ea4all-gradio-agents-mcp-hackathon-kickoff
Browse files- .gitignore +179 -0
- README.md +107 -1
- app.py +31 -5
- ea4all/__main__.py +18 -0
- ea4all/app_ea4all_agent.py +655 -0
- ea4all/ea4all_mcp.py +28 -0
- ea4all/ea4all_store/APM-ea4all (test-split).xlsx +0 -0
- ea4all/ea4all_store/apm_qna_mock.txt +4 -0
- ea4all/ea4all_store/dbr.txt +32 -0
- ea4all/ea4all_store/ea4all_overview.txt +96 -0
- ea4all/ea4all_store/reference_architecture_dbr_assistant.txt +9 -0
- ea4all/ea4all_store/reference_architecture_dbr_demo.txt +43 -0
- ea4all/ea4all_store/strategic_principles.txt +40 -0
- ea4all/main.py +6 -0
- ea4all/packages.txt +0 -0
- ea4all/src/__init__.py +4 -0
- ea4all/src/ea4all_apm/configuration.py +35 -0
- ea4all/src/ea4all_apm/graph.py +931 -0
- ea4all/src/ea4all_apm/prompts.py +292 -0
- ea4all/src/ea4all_apm/state.py +87 -0
- ea4all/src/ea4all_gra/configuration.py +46 -0
- ea4all/src/ea4all_gra/data.py +131 -0
- ea4all/src/ea4all_gra/graph.py +409 -0
- ea4all/src/ea4all_gra/state.py +151 -0
- ea4all/src/ea4all_gra/togaf_task1/graph.py +125 -0
- ea4all/src/ea4all_gra/togaf_task1/state.py +53 -0
- ea4all/src/ea4all_gra/togaf_task2/graph.py +441 -0
- ea4all/src/ea4all_gra/togaf_task2/state.py +49 -0
- ea4all/src/ea4all_gra/togaf_task3/graph.py +280 -0
- ea4all/src/ea4all_gra/togaf_task3/state.py +66 -0
- ea4all/src/ea4all_gra/utils.py +125 -0
- ea4all/src/ea4all_indexer/__init__.py +5 -0
- ea4all/src/ea4all_indexer/configuration.py +22 -0
- ea4all/src/ea4all_indexer/graph.py +57 -0
- ea4all/src/ea4all_indexer/state.py +44 -0
- ea4all/src/ea4all_vqa/configuration.py +42 -0
- ea4all/src/ea4all_vqa/graph.py +405 -0
- ea4all/src/ea4all_vqa/state.py +64 -0
- ea4all/src/graph.py +254 -0
- ea4all/src/shared/__init__.py +1 -0
- ea4all/src/shared/configuration.py +161 -0
- ea4all/src/shared/prompts.py +393 -0
- ea4all/src/shared/state.py +84 -0
- ea4all/src/shared/utils.py +478 -0
- ea4all/src/shared/vectorstore.py +196 -0
- ea4all/src/tools/tools.py +105 -0
.gitignore
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
.DS_Store
|
9 |
+
|
10 |
+
# Projects hosted @Hugging Face
|
11 |
+
../ea4all-agentic-live/ # EA4ALL Agentic Live
|
12 |
+
../ea4all-agentic-system/ # EA4ALL Agentic System
|
13 |
+
../ea4all-agentic-staging/ # EA4ALL Agentic Build/Test
|
14 |
+
|
15 |
+
# EA4ALL artifacts
|
16 |
+
*.wav
|
17 |
+
*.png
|
18 |
+
*.faiss
|
19 |
+
*.pkl
|
20 |
+
togaf_runway_*
|
21 |
+
|
22 |
+
# Langchain / Langgraph
|
23 |
+
.langgraph_api/
|
24 |
+
lgs-dev-start
|
25 |
+
|
26 |
+
|
27 |
+
# Distribution / packaging
|
28 |
+
.Python
|
29 |
+
build/
|
30 |
+
develop-eggs/
|
31 |
+
dist/
|
32 |
+
downloads/
|
33 |
+
eggs/
|
34 |
+
.eggs/
|
35 |
+
lib/
|
36 |
+
lib64/
|
37 |
+
parts/
|
38 |
+
sdist/
|
39 |
+
var/
|
40 |
+
wheels/
|
41 |
+
share/python-wheels/
|
42 |
+
*.egg-info/
|
43 |
+
.installed.cfg
|
44 |
+
*.egg
|
45 |
+
MANIFEST
|
46 |
+
|
47 |
+
# PyInstaller
|
48 |
+
# Usually these files are written by a python script from a template
|
49 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
50 |
+
*.manifest
|
51 |
+
*.spec
|
52 |
+
|
53 |
+
# Installer logs
|
54 |
+
pip-log.txt
|
55 |
+
pip-delete-this-directory.txt
|
56 |
+
|
57 |
+
# Unit test / coverage reports
|
58 |
+
htmlcov/
|
59 |
+
.tox/
|
60 |
+
.nox/
|
61 |
+
.coverage
|
62 |
+
.coverage.*
|
63 |
+
.cache
|
64 |
+
nosetests.xml
|
65 |
+
coverage.xml
|
66 |
+
*.cover
|
67 |
+
*.py,cover
|
68 |
+
.hypothesis/
|
69 |
+
.pytest_cache/
|
70 |
+
cover/
|
71 |
+
|
72 |
+
# Translations
|
73 |
+
*.mo
|
74 |
+
*.pot
|
75 |
+
|
76 |
+
# Django stuff:
|
77 |
+
*.log
|
78 |
+
local_settings.py
|
79 |
+
db.sqlite3
|
80 |
+
db.sqlite3-journal
|
81 |
+
|
82 |
+
# Flask stuff:
|
83 |
+
instance/
|
84 |
+
.webassets-cache
|
85 |
+
|
86 |
+
# Scrapy stuff:
|
87 |
+
.scrapy
|
88 |
+
|
89 |
+
# Sphinx documentation
|
90 |
+
docs/_build/
|
91 |
+
|
92 |
+
# PyBuilder
|
93 |
+
.pybuilder/
|
94 |
+
target/
|
95 |
+
|
96 |
+
# Jupyter Notebook
|
97 |
+
.ipynb_checkpoints
|
98 |
+
|
99 |
+
# IPython
|
100 |
+
profile_default/
|
101 |
+
ipython_config.py
|
102 |
+
|
103 |
+
# pyenv
|
104 |
+
# For a library or package, you might want to ignore these files since the code is
|
105 |
+
# intended to run in multiple environments; otherwise, check them in:
|
106 |
+
# .python-version
|
107 |
+
|
108 |
+
# pipenv
|
109 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
110 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
111 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
112 |
+
# install all needed dependencies.
|
113 |
+
#Pipfile.lock
|
114 |
+
|
115 |
+
# poetry
|
116 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
117 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
118 |
+
# commonly ignored for libraries.
|
119 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
120 |
+
#poetry.lock
|
121 |
+
|
122 |
+
# pdm
|
123 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
124 |
+
#pdm.lock
|
125 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
126 |
+
# in version control.
|
127 |
+
# https://pdm.fming.dev/#use-with-ide
|
128 |
+
.pdm.toml
|
129 |
+
|
130 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
131 |
+
__pypackages__/
|
132 |
+
|
133 |
+
# Celery stuff
|
134 |
+
celerybeat-schedule
|
135 |
+
celerybeat.pid
|
136 |
+
|
137 |
+
# SageMath parsed files
|
138 |
+
*.sage.py
|
139 |
+
|
140 |
+
# Environments
|
141 |
+
.envrc
|
142 |
+
.env
|
143 |
+
.venv
|
144 |
+
env/
|
145 |
+
venv/
|
146 |
+
ENV/
|
147 |
+
env.bak/
|
148 |
+
venv.bak/
|
149 |
+
|
150 |
+
# Spyder project settings
|
151 |
+
.spyderproject
|
152 |
+
.spyproject
|
153 |
+
|
154 |
+
# Rope project settings
|
155 |
+
.ropeproject
|
156 |
+
|
157 |
+
# mkdocs documentation
|
158 |
+
/site
|
159 |
+
|
160 |
+
# mypy
|
161 |
+
.mypy_cache/
|
162 |
+
.dmypy.json
|
163 |
+
dmypy.json
|
164 |
+
|
165 |
+
# Pyre type checker
|
166 |
+
.pyre/
|
167 |
+
|
168 |
+
# pytype static type analyzer
|
169 |
+
.pytype/
|
170 |
+
|
171 |
+
# Cython debug symbols
|
172 |
+
cython_debug/
|
173 |
+
|
174 |
+
# PyCharm
|
175 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
176 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
177 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
178 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
179 |
+
#.idea/
|
README.md
CHANGED
@@ -11,4 +11,110 @@ license: apache-2.0
|
|
11 |
short_description: Enterprise Architecture Agentic system exposed as Gradio MCP
|
12 |
---
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
short_description: Enterprise Architecture Agentic system exposed as Gradio MCP
|
12 |
---
|
13 |
|
14 |
+
## Architect Agentic Companion
|
15 |
+
|
16 |
+

|
17 |
+
|
18 |
+
|
19 |
+
## Background
|
20 |
+
|
21 |
+
- `Trigger`: How disruptive may Generative AI be for Enterprise Architecture Capability (People, Process and Tools)?
|
22 |
+
- `Motivation`: Master GenAI while disrupting Enterprise Architecture to empower individuals and organisations with ability to harness EA value and make people lives better, safer and more efficient.
|
23 |
+
- `Ability`: Exploit my carrer background and skillset across system development, business accumen, innovation and architecture to accelerate GenAI exploration while learning new things.
|
24 |
+
|
25 |
+
> That's how the `EA4ALL-Agentic system` was born and ever since continuously evolving to build an ecosystem of **Architects Agent partners**.
|
26 |
+
|
27 |
+
## Benefits
|
28 |
+
|
29 |
+
- `Empower individuals with Knowledge`: understand and talk about Business and Technology strategy, IT landscape, Architectue Artefacts in a single click of button.
|
30 |
+
- `Increase efficiency and productivity`: generate a documented architecture with diagram, model and descriptions. Accelerate Business Requirement identification and translation to Target Reference Architecture. Automated steps and reduced times for task execution.
|
31 |
+
- `Improve agility`: plan, execute, review and iterate over EA inputs and outputs. Increase the ability to adapt, transform and execute at pace and scale in response to changes in strategy, threats and opportunities.
|
32 |
+
- `Increase collaboration`: democratise architecture work and knowledge with anyone using natural language.
|
33 |
+
- `Cost optimisation`: intelligent allocation of architects time for valuable business tasks.
|
34 |
+
- `Business Growth`: create / re-use of (new) products and services, and people experience enhancements.
|
35 |
+
- `Resilience`: assess solution are secured by design, poses any risk and how to mitigate, apply best-practices.
|
36 |
+
- `Streamline`: the process of managing and utilizsng architectural knowledge and tools in a user-friendly way.
|
37 |
+
|
38 |
+
## Knowledge context
|
39 |
+
|
40 |
+
Synthetic datasets are used to exemplify the Agentic System capabilities.
|
41 |
+
|
42 |
+
### IT Landscape Question and Answering
|
43 |
+
|
44 |
+
- Application name
|
45 |
+
- Business fit: appropriate, inadequate, perfect
|
46 |
+
- Technical fit: adequate, insufficient, perfect
|
47 |
+
- Business_criticality: operational, medium, high, critical
|
48 |
+
- Roadmap: maintain, invest, divers
|
49 |
+
- Architect responsible
|
50 |
+
- Hosting: user device, on-premise, IaaS, SaaS
|
51 |
+
- Business capability
|
52 |
+
- Business domain
|
53 |
+
- Description
|
54 |
+
|
55 |
+
- Bring Your Own Data: upload your own IT landscape data
|
56 |
+
- Application Portfolio Management
|
57 |
+
- xlsx tabular format
|
58 |
+
- first row (header) with fields name (colums)
|
59 |
+
|
60 |
+
### Architecture Diagram Visual Question and Answering
|
61 |
+
|
62 |
+
- Architecture Visual Artefacts
|
63 |
+
- jpeg, png
|
64 |
+
|
65 |
+
**Disclaimer**
|
66 |
+
- Your data & image are not accessible or shared with anyone else nor used for training purpose.
|
67 |
+
- EA4ALL-VQA Agent should be used ONLY FOR Architecture Diagram images.
|
68 |
+
- This feature should NOT BE USED to process inappropriate content.
|
69 |
+
|
70 |
+
### Reference Architecture Generation
|
71 |
+
|
72 |
+
- Clock in/out Use-case
|
73 |
+
|
74 |
+
## Log / Traceability
|
75 |
+
|
76 |
+
For purpose of continuous improvement, agentic workflows are logged in.
|
77 |
+
|
78 |
+
## Architecture
|
79 |
+
|
80 |
+
<italic>Core architecture built upon Python, Langchain, Langgraph, Langsmith, and Gradio.<italic>
|
81 |
+
|
82 |
+
- Python
|
83 |
+
- Pandas
|
84 |
+
- Langchain
|
85 |
+
- Langgraph
|
86 |
+
- Huggingface
|
87 |
+
- CrewAI
|
88 |
+
|
89 |
+
- RAG (Retrieval Augmented Generation)
|
90 |
+
- Vectorstore
|
91 |
+
|
92 |
+
- Prompt Engineering
|
93 |
+
- Strategy & tactics: Task / Sub-tasks
|
94 |
+
- Agentic Workflow
|
95 |
+
|
96 |
+
- Models:
|
97 |
+
- OpenAI
|
98 |
+
- Meta/Llama
|
99 |
+
- Google Gemini
|
100 |
+
|
101 |
+
- Hierarchical-Agent-Teams:
|
102 |
+
- Tabular-question-answering over your own document
|
103 |
+
- Supervisor
|
104 |
+
- Visual Questions Answering
|
105 |
+
- Diagram Component Analysis
|
106 |
+
- Risk & Vulnerability and Mitigation options
|
107 |
+
- Well-Architecture Design Assessment
|
108 |
+
- Vision and Target Architecture
|
109 |
+
- Architect Demand Management
|
110 |
+
|
111 |
+
- User Interface
|
112 |
+
- Gradio
|
113 |
+
|
114 |
+
- Observability & Evaluation
|
115 |
+
- Langsmith
|
116 |
+
|
117 |
+
- Hosting
|
118 |
+
- Huggingface Space
|
119 |
+
|
120 |
+
Check out the configuration reference at [spaces-config-reference](https://huggingface.co/docs/hub/spaces-config-reference)
|
app.py
CHANGED
@@ -1,7 +1,33 @@
|
|
1 |
-
import gradio as gr
|
2 |
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#import gradio as gr
|
2 |
|
3 |
+
#def greet(name):
|
4 |
+
# return "Hello " + name + "!!"
|
5 |
|
6 |
+
#demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
+
#demo.launch()
|
8 |
+
|
9 |
+
##version 2025-05-17
|
10 |
+
# LangChain environment variables
|
11 |
+
from pathlib import Path
|
12 |
+
import sys, os
|
13 |
+
|
14 |
+
if __name__ == '__main__':
|
15 |
+
|
16 |
+
current_path = Path.cwd()
|
17 |
+
sys.path.append(os.path.join(str(current_path), 'ea4all', 'src'))
|
18 |
+
|
19 |
+
print (f"Current path: {current_path} \n Parent {current_path.parent} \n Root path: {str(Path.cwd())}")
|
20 |
+
|
21 |
+
#Set environment variables for build deployment (local run)
|
22 |
+
ea4all_stage = os.environ["EA4ALL_ENV"]
|
23 |
+
if ea4all_stage in ('MCP'):
|
24 |
+
project_name = "ea4all-gradio-agent-mcp-hackathon"
|
25 |
+
runname = "ea4all-gradio-agent-mcp-hackathon-run"
|
26 |
+
os.environ["LANGCHAIN_PROJECT"] = project_name # Optional: "default" is used if not set
|
27 |
+
os.environ['LANGCHAIN_RUNNAME'] = runname
|
28 |
+
os.environ['EA4ALL_ENV'] = ea4all_stage
|
29 |
+
|
30 |
+
|
31 |
+
#ea4all-agent-entry-point
|
32 |
+
from ea4all.__main__ import main
|
33 |
+
main()
|
ea4all/__main__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ea4all import app_ea4all_agent.ea4all_agentUI as ea4all_mcp
|
2 |
+
import os
|
3 |
+
|
4 |
+
def main() -> None:
|
5 |
+
#Launch UI
|
6 |
+
try:
|
7 |
+
ea4all_mcp.launch(
|
8 |
+
server_name=os.getenv("GRADIO_SERVER_NAME","0.0.0.0"),
|
9 |
+
server_port=os.getenv("GRADIO_SERVER_PORT",None),
|
10 |
+
debug=os.getenv("GRADIO_DEBUG",True),
|
11 |
+
ssr_mode=False,
|
12 |
+
mcp_server=True,
|
13 |
+
)
|
14 |
+
except Exception as e:
|
15 |
+
print(f"Error loading: {e}")
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
main()
|
ea4all/app_ea4all_agent.py
ADDED
@@ -0,0 +1,655 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#Added agentic-workflow-collaboration-agents
|
2 |
+
#Multimodal ChatInterface - not working
|
3 |
+
#Added new QA Tab
|
4 |
+
#Added new agent Well-Architected
|
5 |
+
#Added Supervisor Agent workflow
|
6 |
+
#ISSUE with VQA fixed
|
7 |
+
#LLMChain refactored
|
8 |
+
#Updated with changes as result of ea4all_agent Gradio Space deployment issues
|
9 |
+
#VQA Safeguardings - JPEG, PNG images only
|
10 |
+
#Deployed version to Live
|
11 |
+
#Library import refactoring, ea4all-architecture, empty message
|
12 |
+
#Bring your own IT Landscape data: discontinued
|
13 |
+
#Added upload your Business Requirement
|
14 |
+
#Load user's APM - disabled 2024-06-22
|
15 |
+
#TEST E2E Togaf Agentic system 2024-06-24
|
16 |
+
#MIGRATION TO HF Open Source using TGI and Meta-Llama-3-8B-Instruct 2024-06-25
|
17 |
+
#ADDED GENERATE_ARCHITECTURE_RUNWAY diagram: graphviz 2024-07-03
|
18 |
+
#REFERENCE ARCHITECTURE DYNAMIC TABS 2024-07-05
|
19 |
+
#ADDED Business Query grader 2024-07-07
|
20 |
+
#RCA Togaf Supervisor: increase reliability 2024-07-08 - ISSUE FIXED BY NOW
|
21 |
+
#EA4ALL-agentic-system-container updated 2024-07-10
|
22 |
+
###APM Agentic system: 2024-07-25 - Safety check added
|
23 |
+
##Sub-graph node stream 204-07-26
|
24 |
+
# Stream arbitrary nested content: https://langchain-ai.github.io/langgraph/how-tos/streaming-content/
|
25 |
+
## Prompt refinement task_router, user_question_routing, prompt_category 2024-07-27
|
26 |
+
## WebSearch Hallucination issue - recursion looping - solution: routing to route_question 2024-07-28
|
27 |
+
## Safety_check greetings, msgs, APM Sample Dataset 2024-09-29
|
28 |
+
# VQA issue - image not recognised 2024-07-30
|
29 |
+
# Constants IMAGES (Architecture, Overview) 2024-07-31
|
30 |
+
# About, QnA Examples moved to mock files 2024-08-01 - deployed to build
|
31 |
+
## 2024-08-03: VQA Streaming, Diagrams' EDGE nodes changed to END - one task at a time: 2024-08-03
|
32 |
+
## VQA Llama-3.2-11B-Vision-Instruct 2024-10-25
|
33 |
+
#RELEASE 2024-11-15
|
34 |
+
## CHANGES 2024-11-22
|
35 |
+
# MIGRATION to Gradio 5
|
36 |
+
# Chatbot UI migrated to gr.Chatbot
|
37 |
+
# run_qna_agentic_system, run_vqa_agentic_system updated: ChatMessage, chat_memory, UI events
|
38 |
+
# chat_memory VQA missing image - fixed - needs improvement
|
39 |
+
## RELEASE 2024-11-23
|
40 |
+
#pip freeze > requirements.txt to keep libraries synched local and HF Spaces
|
41 |
+
#gr.Image issue: caused by __main__ root_path=str(Path.cwd())
|
42 |
+
## RELEASE 2024-12-09
|
43 |
+
#Confluence Integration
|
44 |
+
#Llama-3.2-11B-Vision-Instruct max_token issue <=4096 stills
|
45 |
+
#Safety-check refinement
|
46 |
+
#TOGAF Vision streaming
|
47 |
+
## Release update 2024-12-11
|
48 |
+
#EA4ALL Podcast
|
49 |
+
#2025-02-03 RELEASE V1
|
50 |
+
##RETROFIT & INTEGRATION w/ EA4ALL-dev-studio-structure
|
51 |
+
#2025-02-09
|
52 |
+
##UAT EA4ALL-LGS-RETRIEVER-REFACTORED
|
53 |
+
#2025-03-10
|
54 |
+
##AI-Assistant-UI-Message-Stream refactor
|
55 |
+
#2025-12-04
|
56 |
+
## Add EA4ALL-PMO-Demand-Management CrewAI Agents
|
57 |
+
#2025-05-06
|
58 |
+
## Add MCP Server
|
59 |
+
#2025-05-17
|
60 |
+
## Added PMO_MOCK_QNA examples,
|
61 |
+
## get_relevant_questions() - moved to utils, constants moved to configuration
|
62 |
+
#2025-05-19
|
63 |
+
## EA4ALL Diagram refactored, vqa_max_tokens updated
|
64 |
+
from langchain.callbacks.tracers import LangChainTracer
|
65 |
+
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
66 |
+
from langchain_community.document_loaders import ConfluenceLoader
|
67 |
+
from langchain_core.messages import HumanMessage
|
68 |
+
from langsmith import Client
|
69 |
+
|
70 |
+
from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
|
71 |
+
from ea4all.src.shared import vectorstore
|
72 |
+
from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
|
73 |
+
from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
|
74 |
+
import ea4all.src.ea4all_apm.graph as e4a
|
75 |
+
import ea4all.src.ea4all_vqa.graph as e4v
|
76 |
+
import ea4all.src.ea4all_gra.graph as e4t
|
77 |
+
import ea4all.src.shared.utils as e4u
|
78 |
+
from ea4all.src.ea4all_indexer.graph import indexer_graph
|
79 |
+
from ea4all.src.graph import super_graph
|
80 |
+
from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
|
81 |
+
|
82 |
+
import uuid
|
83 |
+
import os
|
84 |
+
import pandas as pd
|
85 |
+
|
86 |
+
import gradio as gr
|
87 |
+
from gradio import ChatMessage
|
88 |
+
import time
|
89 |
+
from PIL import Image
|
90 |
+
|
91 |
+
#Set LangSmith project
|
92 |
+
tracer = LangChainTracer(project_name=os.getenv('LANGCHAIN_PROJECT'))
|
93 |
+
|
94 |
+
config = {
|
95 |
+
"run_name": os.getenv('LANGCHAIN_RUNNAME'),
|
96 |
+
"tags": [os.getenv('EA4ALL_ENV')],
|
97 |
+
"callbacks":[tracer],
|
98 |
+
"recursion_limit": 25,
|
99 |
+
"configurable": {
|
100 |
+
"thread_id": uuid.uuid4(),
|
101 |
+
},
|
102 |
+
"stream_mode": "messages"
|
103 |
+
}
|
104 |
+
|
105 |
+
#Blocks w/ ChatInterface, BYOD, About
|
106 |
+
with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_agentUI:
|
107 |
+
|
108 |
+
agentic_pmo_desc="""
|
109 |
+
Hi,
|
110 |
+
Provide project resource estimation for architecture work based on business requirements, skillset,
|
111 |
+
architects allocation, and any other relevant information to enable successful project solution delivery."""
|
112 |
+
|
113 |
+
agentic_qna_desc="""
|
114 |
+
Hi,
|
115 |
+
improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
|
116 |
+
As an Enterprise Architect Agentic System I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
|
117 |
+
|
118 |
+
agentic_vqa_desc="""
|
119 |
+
Hi, talk to your Architecture Diagram using natural language. Gain rapid knowledge and insights translating image to meaningful description.
|
120 |
+
**Disclaimer**:
|
121 |
+
- This feature should NOT BE USED to process inappropriate content, but ONLY FOR Architecture Diagrams
|
122 |
+
"""
|
123 |
+
|
124 |
+
agentic_togaf_desc="""
|
125 |
+
Hi,
|
126 |
+
in a click of button create a reference architecture that serves as a blueprint for designing and implementing IT solutions.
|
127 |
+
Standardise, increase efficiency and productivity to architecture solution development.
|
128 |
+
Generate context-specific reference and minimal viable architectures to support business and IT strategy and digital transformation.
|
129 |
+
Streamline the architecture operating model, taking the best of agentic workflows and architects working together.
|
130 |
+
"""
|
131 |
+
|
132 |
+
#ea4all-about
|
133 |
+
|
134 |
+
def ea4all_about():
|
135 |
+
readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
|
136 |
+
return readme
|
137 |
+
|
138 |
+
#Load demo business requirements
|
139 |
+
def init_dbr():
|
140 |
+
# Open the file in read mode ('r')
|
141 |
+
with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
|
142 |
+
# Read the contents of the file
|
143 |
+
contents = file.read()
|
144 |
+
return contents
|
145 |
+
|
146 |
+
def init_df():
|
147 |
+
return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
|
148 |
+
|
149 |
+
#load core-architecture image
|
150 |
+
#fix the issue with gr.Image(path) inside a docker containder
|
151 |
+
def get_image(_image):
|
152 |
+
#from PIL import Image
|
153 |
+
# Load an image
|
154 |
+
image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
|
155 |
+
print(f"Full path: {image}")
|
156 |
+
|
157 |
+
return image
|
158 |
+
|
159 |
+
#ea4all-qna-agent-conversational-with-memory
|
160 |
+
async def run_qna_agentic_system(prompt, chat_memory, request:gr.Request):
|
161 |
+
|
162 |
+
format_response = ""
|
163 |
+
|
164 |
+
if not prompt:
|
165 |
+
format_response = "Hi, how are you today? To start our conversation, please chat your message!"
|
166 |
+
chat_memory.append(ChatMessage(role="assistant", content=format_response))
|
167 |
+
yield chat_memory
|
168 |
+
|
169 |
+
if not chat_memory:
|
170 |
+
chat_memory.append(ChatMessage(role="user", content=prompt))
|
171 |
+
yield chat_memory
|
172 |
+
|
173 |
+
if prompt:
|
174 |
+
#capture user ip
|
175 |
+
ea4all_user = e4u.get_user_identification(request)
|
176 |
+
|
177 |
+
##Initialise APM Graph
|
178 |
+
#apm_graph = e4a.apm_graph
|
179 |
+
#inputs = {"question": prompt, "chat_memory":chat_memory}
|
180 |
+
inputs = {"messages": [{"role": "user", "content": prompt}]}
|
181 |
+
|
182 |
+
#add prompt to memory
|
183 |
+
chat_memory.append(ChatMessage(role="user", content=prompt))
|
184 |
+
|
185 |
+
partial_message = ""
|
186 |
+
async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
|
187 |
+
#async for event in super_graph.astream(input=inputs, config=config, subgraphs=True):
|
188 |
+
# chat_memory.append(ChatMessage(role="assistant", content=str(event)))
|
189 |
+
# yield chat_memory
|
190 |
+
|
191 |
+
kind = event["event"]
|
192 |
+
tags = event.get("tags", [])
|
193 |
+
name = event['name']
|
194 |
+
|
195 |
+
#chat_memory.append(ChatMessage(role="assistant", content=f"Running: {name}"))
|
196 |
+
#yield chat_memory
|
197 |
+
|
198 |
+
if name == "safety_check":
|
199 |
+
#if kind == "on_chain_start":
|
200 |
+
# chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
|
201 |
+
# yield chat_memory
|
202 |
+
if kind == "on_chain_stream":
|
203 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`: {event['data']['chunk']['safety_status'][0]}"))
|
204 |
+
if event['data']['chunk']['safety_status'][0] == 'no':
|
205 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"Safety-status: {event['data']['chunk']['safety_status'][1]}"))
|
206 |
+
yield chat_memory
|
207 |
+
if kind == "on_chain_end" and name == "route_question":
|
208 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` {event['data']['output']['source']}"))
|
209 |
+
yield chat_memory
|
210 |
+
if kind == "on_chain_start" and name == "retrieve":
|
211 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
|
212 |
+
yield chat_memory
|
213 |
+
if kind == "on_chain_start" and name in ("generate_web_search", "websearch", "stream_generation"):
|
214 |
+
chat_memory.append(ChatMessage(role="assistant", content= f"\n\n- `{name}`\n\n"))
|
215 |
+
yield chat_memory
|
216 |
+
if kind == "on_chain_stream" and name == "stream_generation":
|
217 |
+
data = event["data"]
|
218 |
+
# Accumulate the chunk of data
|
219 |
+
partial_message += data['chunk']
|
220 |
+
chat_memory[-1].content = partial_message
|
221 |
+
time.sleep(0.05)
|
222 |
+
yield chat_memory
|
223 |
+
if name == "grade_generation_v_documents_and_question":
|
224 |
+
if kind == "on_chain_start":
|
225 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
|
226 |
+
yield chat_memory
|
227 |
+
if kind == "on_chain_end":
|
228 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"`{event['data']['input'].source}:` {event['data']['output']}"))
|
229 |
+
yield chat_memory
|
230 |
+
if "stream_hallucination" in tags and kind == "on_chain_start":
|
231 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
232 |
+
yield chat_memory
|
233 |
+
if "stream_grade_answer" in tags and kind == "on_chain_start":
|
234 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
235 |
+
yield chat_memory
|
236 |
+
if name == "supervisor":
|
237 |
+
if kind == "on_chain_start":
|
238 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
|
239 |
+
yield chat_memory
|
240 |
+
if kind == "on_chain_stream":
|
241 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"{event['data']['chunk']}"))
|
242 |
+
yield chat_memory
|
243 |
+
|
244 |
+
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
245 |
+
wait_for_all_tracers()
|
246 |
+
|
247 |
+
#Trigger Solution Architecture Diagram QnA
|
248 |
+
async def run_vqa_agentic_system(message, chat_memory, request:gr.Request):
|
249 |
+
#capture user ip
|
250 |
+
ea4all_user = e4u.get_user_identification(request)
|
251 |
+
|
252 |
+
"""Handle file uploads and validate their types."""
|
253 |
+
allowed_file_types = ('JPEG', 'PNG')
|
254 |
+
|
255 |
+
print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
|
256 |
+
print(f"Prompt: {message}")
|
257 |
+
|
258 |
+
if message['files'] == []:
|
259 |
+
chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
|
260 |
+
yield chat_memory
|
261 |
+
else:
|
262 |
+
diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
|
263 |
+
msg = message['text'] ##chat_memory[-2]['content']
|
264 |
+
print(f"---DIAGRAM: {diagram}---")
|
265 |
+
try:
|
266 |
+
if msg == "":
|
267 |
+
msg = "Please describe this diagram."
|
268 |
+
|
269 |
+
with Image.open(diagram) as diagram_:
|
270 |
+
if diagram_.format not in allowed_file_types:
|
271 |
+
chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
|
272 |
+
yield chat_memory
|
273 |
+
else:
|
274 |
+
#'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
|
275 |
+
vqa_image = diagram
|
276 |
+
|
277 |
+
#Setup Quality Assurance Agentic System
|
278 |
+
#graph = e4v.ea4all_graph(config['configurable']['vqa_model'])
|
279 |
+
|
280 |
+
#Setup enter graph
|
281 |
+
diagram_graph = e4v.diagram_graph
|
282 |
+
|
283 |
+
partial_message = ""
|
284 |
+
chat_memory.append(ChatMessage(role="assistant", content="Hi, I am working on your question..."))
|
285 |
+
async for event in diagram_graph.astream_events(
|
286 |
+
{"question":msg, "image": vqa_image}, config, version="v2"
|
287 |
+
):
|
288 |
+
if (
|
289 |
+
event["event"] == "on_chat_model_stream"
|
290 |
+
and "vqa_stream" in event['tags']
|
291 |
+
#and event["metadata"].get("langgraph_node") == "tools"
|
292 |
+
):
|
293 |
+
partial_message += event["data"]["chunk"].content
|
294 |
+
chat_memory[-1].content = partial_message
|
295 |
+
time.sleep(e4u.CFG.STREAM_SLEEP)
|
296 |
+
yield chat_memory #, message to update prompt
|
297 |
+
elif not partial_message:
|
298 |
+
yield chat_memory #, message
|
299 |
+
|
300 |
+
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
301 |
+
wait_for_all_tracers()
|
302 |
+
|
303 |
+
except Exception as e:
|
304 |
+
yield (e.args[-1])
|
305 |
+
|
306 |
+
#Run Togaf Agentic System
|
307 |
+
async def run_reference_architecture_agentic_system(business_query, request:gr.Request):
|
308 |
+
|
309 |
+
if len(business_query) < 50:
|
310 |
+
agent_response = "Please provide a valid Business Requirement content to start!"
|
311 |
+
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
312 |
+
else:
|
313 |
+
plain_text = e4u.markdown_to_plain_text(business_query)
|
314 |
+
agent_response = "Generating Architecture Blueprint ---TOGAF VISION TARGET--- \n\nI am working on your request..."
|
315 |
+
togaf_chain = e4t.togaf_graph
|
316 |
+
final_diagram = ""
|
317 |
+
vision_message = ""
|
318 |
+
try:
|
319 |
+
async for s in togaf_chain.astream_events(
|
320 |
+
{
|
321 |
+
"messages": [
|
322 |
+
HumanMessage(
|
323 |
+
content=plain_text
|
324 |
+
)
|
325 |
+
],
|
326 |
+
"business_query": business_query,
|
327 |
+
},
|
328 |
+
config=config,
|
329 |
+
version="v2"
|
330 |
+
):
|
331 |
+
kind = s["event"]
|
332 |
+
tags = s.get("tags", [])
|
333 |
+
name = s['name']
|
334 |
+
|
335 |
+
if "gra_stream" in tags and name == "stream_vision_target":
|
336 |
+
if kind == "on_chain_stream":
|
337 |
+
data = s["data"]
|
338 |
+
# Accumulate the chunk of data
|
339 |
+
vision_message += data['chunk'].content
|
340 |
+
time.sleep(e4u.CFG.STREAM_SLEEP)
|
341 |
+
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
342 |
+
elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
|
343 |
+
final_diagram = s['data']['output']['architecture_runway']
|
344 |
+
elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
|
345 |
+
agent_response += f"\n\n`{tags[-1]}:{name}`"
|
346 |
+
|
347 |
+
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
348 |
+
|
349 |
+
if vision_message=="":
|
350 |
+
agent_response = "I cannot generate the Architecture Vision. Please provide a valid Business Requirement content to start!"
|
351 |
+
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
352 |
+
elif "Error" not in final_diagram:
|
353 |
+
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),final_diagram, None, gr.Tabs(visible=True)])
|
354 |
+
else:
|
355 |
+
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, final_diagram, gr.Tabs(visible=True)])
|
356 |
+
|
357 |
+
except Exception as e:
|
358 |
+
yield(
|
359 |
+
[
|
360 |
+
e.args[-1],
|
361 |
+
gr.Tabs(visible=True),
|
362 |
+
gr.Tabs(selected="id_togaf"),
|
363 |
+
None,
|
364 |
+
None,
|
365 |
+
gr.Tabs(visible=False)
|
366 |
+
]
|
367 |
+
)
|
368 |
+
|
369 |
+
async def run_pmo_agentic_system(prompt, chat_memory):
|
370 |
+
"""
|
371 |
+
Answer a question about Project Portfolio Management and Architect Demand Management.
|
372 |
+
|
373 |
+
Args:
|
374 |
+
prompt (str): The propject portfolio user question
|
375 |
+
chat_memory (list): The tool message history
|
376 |
+
|
377 |
+
Returns:
|
378 |
+
str: A summary answering the user question
|
379 |
+
"""
|
380 |
+
format_response = ""
|
381 |
+
|
382 |
+
if not prompt:
|
383 |
+
format_response = "Hi, how are you today? To start our conversation, please chat your message!"
|
384 |
+
chat_memory.append(ChatMessage(role="assistant", content=format_response))
|
385 |
+
yield chat_memory
|
386 |
+
|
387 |
+
if not chat_memory:
|
388 |
+
chat_memory.append(ChatMessage(role="user", content=prompt))
|
389 |
+
yield chat_memory
|
390 |
+
|
391 |
+
inputs = {
|
392 |
+
"question": prompt,
|
393 |
+
"verbose": True, # optional flags
|
394 |
+
}
|
395 |
+
|
396 |
+
yield run_pmo_crew(inputs)
|
397 |
+
|
398 |
+
def ea4all_confluence():
|
399 |
+
|
400 |
+
#Confluence API Key
|
401 |
+
confluence_api_key = os.environ['CONFLUENCE_API_KEY']
|
402 |
+
|
403 |
+
loader = ConfluenceLoader(
|
404 |
+
url="https://learnitall.atlassian.net/wiki", username="learn-it-all@outlook.com", api_key=confluence_api_key,
|
405 |
+
space_key="~71202000cd55f36336455f8c07afa1860ba810",
|
406 |
+
include_attachments=False, limit=10,
|
407 |
+
keep_markdown_format=True
|
408 |
+
)
|
409 |
+
|
410 |
+
documents = loader.load()
|
411 |
+
|
412 |
+
data = {
|
413 |
+
"title": [doc.metadata["title"] for doc in documents],
|
414 |
+
"source": [doc.metadata["source"] for doc in documents],
|
415 |
+
"page_content": [doc.page_content for doc in documents],
|
416 |
+
}
|
417 |
+
|
418 |
+
df = pd.DataFrame(data)
|
419 |
+
|
420 |
+
return df
|
421 |
+
|
422 |
+
def filter_page(page_list, title):
|
423 |
+
x = page_list[page_list["title"] == title]
|
424 |
+
return x.iloc[0]['page_content']
|
425 |
+
|
426 |
+
#EA4ALL-Agentic system menu
|
427 |
+
with gr.Tabs(selected="how_to") as tabs:
|
428 |
+
with gr.Tab(label="Architect Demand Management"):
|
429 |
+
with gr.Tab(label="Architect Project Planning", id="pmo_qna_1"):
|
430 |
+
ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
|
431 |
+
pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
432 |
+
pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
433 |
+
with gr.Accordion("Open for prompt examples", open=False):
|
434 |
+
pmo_examples = gr.Dropdown(e4u.get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
|
435 |
+
gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
|
436 |
+
with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
|
437 |
+
pmo_df = gr.Dataframe()
|
438 |
+
with gr.Tab(label="Application Landscape QnA"):
|
439 |
+
with gr.Tabs() as tabs_apm_qna:
|
440 |
+
with gr.Tab(label="Connect, Explore, Together", id="app_qna_1"):
|
441 |
+
ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
|
442 |
+
ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
443 |
+
qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
444 |
+
with gr.Accordion("Open for prompt examples", open=False):
|
445 |
+
qna_examples = gr.Dropdown(e4u.get_relevant_questions(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
|
446 |
+
gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
|
447 |
+
with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
|
448 |
+
apm_df = gr.Dataframe()
|
449 |
+
with gr.Tab(label="Diagram Question and Answering"):
|
450 |
+
gr.Markdown(value=agentic_vqa_desc)
|
451 |
+
ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
|
452 |
+
vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
|
453 |
+
with gr.Accordion("Open for prompt examples", open=False):
|
454 |
+
vqa_examples = gr.Dropdown(e4u.get_vaq_examples(), value=None,label="Diagram and Questions", interactive=True)
|
455 |
+
gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
|
456 |
+
with gr.Tab(label="Reference Architecture", id="id_refarch"):
|
457 |
+
with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
|
458 |
+
with gr.Tab(label='Business Requirement', id="id_dbr"):
|
459 |
+
gr.Markdown(value=agentic_togaf_desc)
|
460 |
+
dbr_text=gr.TextArea(value=init_dbr, lines=14, interactive=True)
|
461 |
+
with gr.Row():
|
462 |
+
dbr_file=gr.File(
|
463 |
+
value=e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock),
|
464 |
+
label="Business Requirement",
|
465 |
+
height=35,
|
466 |
+
show_label=False,
|
467 |
+
file_count="single",
|
468 |
+
file_types=['text'],
|
469 |
+
interactive=True,
|
470 |
+
type='binary'
|
471 |
+
)
|
472 |
+
dbr_run=gr.Button(scale=None,value="Run Reference Architecture")
|
473 |
+
dbr_cls=gr.ClearButton([dbr_file,dbr_text])
|
474 |
+
with gr.Tab(label='Confluence Integration', id="id_confluence"):
|
475 |
+
confluence_list = gr.Dropdown(value=None, label="Confluence Pages", interactive=True)
|
476 |
+
confluence_df = gr.DataFrame(visible=False, headers=["title", "source", "page_content"])
|
477 |
+
@gr.render(inputs=[confluence_list,confluence_df])
|
478 |
+
def show_page(page, df):
|
479 |
+
if page:
|
480 |
+
with gr.Row():
|
481 |
+
with gr.Column():
|
482 |
+
dbr_confluence =gr.Button(scale=None,value="Run Reference Architecture")
|
483 |
+
with gr.Column():
|
484 |
+
btn=gr.Button("Clear")
|
485 |
+
with gr.Row(variant='default', show_progress=True):
|
486 |
+
page_content = gr.Markdown(filter_page(df,page), line_breaks=True)
|
487 |
+
btn.click(lambda: gr.Dropdown(value=None), None, confluence_list)
|
488 |
+
dbr_confluence.click(run_reference_architecture_agentic_system,show_progress='full', inputs=[page_content],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
|
489 |
+
with gr.Tab(label='Reference Architecture',visible=False, id="id_togaf") as tabs_togaf:
|
490 |
+
togaf_vision=gr.Markdown(value='### Reference Architecture: Vision and Target')
|
491 |
+
with gr.Tab(label="Target Architecture Runway",visible=False, id="id_runway") as tab_diagram:
|
492 |
+
diagram_header=gr.Markdown(visible=True)
|
493 |
+
architecture_runway=gr.Image(label="Target Architecture Runway",interactive=False,visible=True, scale=10)
|
494 |
+
with gr.Tab(label="Overview", id="how_to"):
|
495 |
+
gr.Markdown(
|
496 |
+
"""
|
497 |
+
# Title
|
498 |
+
|
499 |
+
**Explore, Share, Together:** harness the value of `Enterprise Architecture in the era of Generative AI` to positively impact individuals and organisations.\n
|
500 |
+
|
501 |
+
## Overview
|
502 |
+
"""
|
503 |
+
),
|
504 |
+
gr.Image(
|
505 |
+
get_image(e4u.CFG.EA4ALL_ARCHITECTURE),
|
506 |
+
show_download_button=False,
|
507 |
+
container=False,
|
508 |
+
show_share_button=False,
|
509 |
+
)
|
510 |
+
gr.Markdown(
|
511 |
+
"""
|
512 |
+
## Journey
|
513 |
+
|
514 |
+
Audio overview summarising the key learnings, challenges, so what, stats from day-1 to last sprint. (**Powered by Google NoteBookLM**)
|
515 |
+
|
516 |
+
"""
|
517 |
+
)
|
518 |
+
podcast = gr.Audio(
|
519 |
+
type="filepath",
|
520 |
+
value=os.path.join(BaseConfiguration.ea4all_store,e4u.CFG.EA4ALL_PODCAST),
|
521 |
+
label="EA4ALL Journey Podcast",
|
522 |
+
show_download_button=False,
|
523 |
+
autoplay=False,
|
524 |
+
container=True,
|
525 |
+
interactive=False,
|
526 |
+
)
|
527 |
+
gr.Markdown(ea4all_about)
|
528 |
+
|
529 |
+
#get LLM response user's feedback
|
530 |
+
def get_user_feedback(evt: gr.SelectData, request:gr.Request):
|
531 |
+
##{evt.index} {evt.value} {evt._data['liked']}
|
532 |
+
try:
|
533 |
+
uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
|
534 |
+
gr.Info("Thanks for your feedback - run_id: " + uuid_str)
|
535 |
+
run_id = uuid.UUID(uuid_str)
|
536 |
+
client = Client()
|
537 |
+
client.create_feedback(
|
538 |
+
run_id,
|
539 |
+
key="feedback-key",
|
540 |
+
score= 1.0 if evt._data['liked'] == True else 0,
|
541 |
+
comment=str(evt.value)
|
542 |
+
)
|
543 |
+
except Exception as e:
|
544 |
+
gr.Warning(f"Couldn't capture a feedback: {e}")
|
545 |
+
|
546 |
+
#Set initial state of apm, llm and capture user-ip
|
547 |
+
async def ea4all_agent_init(request:gr.Request):
|
548 |
+
|
549 |
+
#capture user IP address
|
550 |
+
#ea4all_user = e4u.get_user_identification(request)
|
551 |
+
gr.Info("Thank you for connecting! I'd love to hear your feedback! Thumbs up or Thumbs down. LinkedIn comment.")
|
552 |
+
|
553 |
+
# Set initial landscape vectorstore
|
554 |
+
|
555 |
+
await indexer_graph.ainvoke(input={"docs":[]}, config=config)
|
556 |
+
|
557 |
+
#set chatbot description w/ user apm columns
|
558 |
+
df = vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
|
559 |
+
columns_string = ', '.join(df.columns)
|
560 |
+
apm_columns = agentic_qna_desc + columns_string
|
561 |
+
|
562 |
+
prompt=ChatMessage(role='assistant', content='Hi, I am your Architect Copilot! How can I help you today?')
|
563 |
+
|
564 |
+
page_list = ea4all_confluence()
|
565 |
+
|
566 |
+
#Load gradio.dataframe with Portfolio sample dataset
|
567 |
+
pmo_df = pd.read_csv("ea4all/ea4all_store/ea4all-portfolio-management.csv")
|
568 |
+
|
569 |
+
return (
|
570 |
+
apm_columns,
|
571 |
+
[prompt],
|
572 |
+
[prompt],
|
573 |
+
[prompt],
|
574 |
+
gr.Dropdown(choices=page_list['title'].values.tolist()),
|
575 |
+
gr.DataFrame(value=page_list),
|
576 |
+
gr.DataFrame(value=df),
|
577 |
+
gr.DataFrame(value=pmo_df),
|
578 |
+
)
|
579 |
+
|
580 |
+
#authentication
|
581 |
+
def ea4all_login(username, password):
|
582 |
+
return (username==password)
|
583 |
+
|
584 |
+
#TABS & Reference Architecture look-and-feel control
|
585 |
+
def off_dbrtext():
|
586 |
+
return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
|
587 |
+
|
588 |
+
def on_dbrtext(file):
|
589 |
+
if file:
|
590 |
+
return gr.TextArea(visible=True)
|
591 |
+
return gr.TextArea(visible=False)
|
592 |
+
|
593 |
+
#Upload & clear business requirement
|
594 |
+
def load_dbr(file):
|
595 |
+
return file.decode()
|
596 |
+
|
597 |
+
def unload_dbr():
|
598 |
+
return gr.TextArea(visible=False)
|
599 |
+
|
600 |
+
def on_dbrtext(file):
|
601 |
+
if file:
|
602 |
+
return gr.TextArea(visible=True)
|
603 |
+
return gr.TextArea(visible=False)
|
604 |
+
|
605 |
+
#Upload & clear business requirement
|
606 |
+
def load_dbr(file):
|
607 |
+
return file.decode()
|
608 |
+
|
609 |
+
def unload_dbr():
|
610 |
+
return gr.TextArea(visible=False)
|
611 |
+
|
612 |
+
#Podcast upload progress
|
613 |
+
podcast.change(show_progress='full')
|
614 |
+
|
615 |
+
#Togaf upload file
|
616 |
+
dbr_file.clear(unload_dbr,outputs=dbr_text)
|
617 |
+
dbr_file.change(on_dbrtext,inputs=dbr_file,outputs=dbr_text)
|
618 |
+
dbr_file.upload(load_dbr,inputs=dbr_file, outputs=dbr_text)
|
619 |
+
dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
|
620 |
+
|
621 |
+
#Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
|
622 |
+
qna_prompt.submit(run_qna_agentic_system,[qna_prompt,ea4all_chatbot],ea4all_chatbot)
|
623 |
+
qna_prompt.submit(lambda: "", None, [qna_prompt])
|
624 |
+
ea4all_chatbot.like(fn=get_user_feedback)
|
625 |
+
qna_examples.input(lambda value: value, qna_examples, qna_prompt)
|
626 |
+
|
627 |
+
#Execute Reference Architecture
|
628 |
+
dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
|
629 |
+
|
630 |
+
#vqa_chatbot (ChatInterface -> Chatbot)
|
631 |
+
def add_message(message, history):
|
632 |
+
if message["text"] is not None:
|
633 |
+
history.append({"role": "user", "content": message["text"]})
|
634 |
+
|
635 |
+
if len(message['files']) > 0:
|
636 |
+
history.append({"role": "user", "content": {"path": message['files'][-1]}})
|
637 |
+
|
638 |
+
return (
|
639 |
+
gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
|
640 |
+
history
|
641 |
+
)
|
642 |
+
|
643 |
+
chat_msg = vqa_prompt.submit(add_message, [vqa_prompt, ea4all_vqa], [vqa_prompt, ea4all_vqa])
|
644 |
+
bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, ea4all_vqa], ea4all_vqa, api_name="bot_response")
|
645 |
+
|
646 |
+
ea4all_vqa.like(fn=get_user_feedback)
|
647 |
+
vqa_examples.input(lambda value: value, vqa_examples, vqa_prompt)
|
648 |
+
|
649 |
+
#Invoke CrewAI PMO Agentic System
|
650 |
+
pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt,pmo_chatbot],pmo_chatbot)
|
651 |
+
pmo_prompt.submit(lambda: "", None, [pmo_prompt])
|
652 |
+
pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
|
653 |
+
|
654 |
+
#Set initial state of apm and llm
|
655 |
+
ea4all_agentUI.load(ea4all_agent_init, outputs=[ea4all_agent_metadata,ea4all_chatbot, ea4all_vqa, pmo_chatbot, confluence_list, confluence_df, apm_df, pmo_df])
|
ea4all/ea4all_mcp.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def letter_counter(word, letter):
|
4 |
+
"""
|
5 |
+
Count the number of occurrences of a letter in a word or text.
|
6 |
+
|
7 |
+
Args:
|
8 |
+
word (str): The input text to search through
|
9 |
+
letter (str): The letter to search for
|
10 |
+
|
11 |
+
Returns:
|
12 |
+
str: A message indicating how many times the letter appears
|
13 |
+
"""
|
14 |
+
word = word.lower()
|
15 |
+
letter = letter.lower()
|
16 |
+
count = word.count(letter)
|
17 |
+
return count
|
18 |
+
|
19 |
+
demo = gr.Interface(
|
20 |
+
fn=letter_counter,
|
21 |
+
inputs=["textbox", "textbox"],
|
22 |
+
outputs="number",
|
23 |
+
title="Letter Counter",
|
24 |
+
description="Enter text and a letter to count how many times the letter appears in the text."
|
25 |
+
)
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
demo.launch(mcp_server=True)
|
ea4all/ea4all_store/APM-ea4all (test-split).xlsx
ADDED
Binary file (16.4 kB). View file
|
|
ea4all/ea4all_store/apm_qna_mock.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
What are the simplification opportunities within the collaboration landscape?
|
2 |
+
Who can I talk to about innovation?
|
3 |
+
What applications support marketing domain?
|
4 |
+
How can Cloud Assessment Framework increase cloud-based landscape benefits?
|
ea4all/ea4all_store/dbr.txt
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Purpose of this document
|
2 |
+
|
3 |
+
The purpose of this document is to provide an overview of the project and specifically detail the business requirements for the project across the relevant business and market areas.
|
4 |
+
|
5 |
+
The requirements and solution will be agreed with the project sponsor (s) through formal review and sign off of this document.
|
6 |
+
Once signed off it will provide input to the subsequent design and development phases of the project.
|
7 |
+
|
8 |
+
Context
|
9 |
+
(These areas can be taken from the Brief/ PID as appropriate)
|
10 |
+
Background
|
11 |
+
• Change in external scenario - more integrated supervisory organs;
|
12 |
+
|
13 |
+
Why we need this project
|
14 |
+
To make it possible to control the schedule of work on employees that are legally required to.
|
15 |
+
|
16 |
+
Expected Business Outcome / Objective (Goal)
|
17 |
+
To implement the Electronic Timecard in all company business units to the public that are subject to the schedule of work and by that, reduce the number and impact of worktime related lawsuits
|
18 |
+
|
19 |
+
Project Objectives
|
20 |
+
Be compliance with current regulation regarding Timestamp with all employees with work schedule.
|
21 |
+
|
22 |
+
Ref,Feature,Description,MoSCoW
|
23 |
+
A,Input,Registration of ins/outs of employees at the system,M
|
24 |
+
G,New Worktime,Creation of new Time schedules for employees,M
|
25 |
+
|
26 |
+
|
27 |
+
Actor Catalogue
|
28 |
+
|
29 |
+
Name, Description,Goals
|
30 |
+
Employees,Employee of company under time control,To register ins and outs
|
31 |
+
Manager,Employees first manager,To approve JMLs and monthly activities regarding Time management of employees
|
32 |
+
HRSS,Key users of Shared Services of Human Resources,To manage the back end of time system
|
ea4all/ea4all_store/ea4all_overview.txt
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Background
|
2 |
+
|
3 |
+
- `Trigger`: How disruptive may Generative AI be for Enterprise Architecture Capability (People, Process and Tools)?
|
4 |
+
- `Motivation`: Master GenAI while disrupting Enterprise Architecture to empower individuals and organisations with ability to harness EA value and make people lives better, safer and more efficient.
|
5 |
+
- `Ability`: Exploit my carrer background and skillset across system development, business accumen, innovation and architecture to accelerate GenAI exploration while learning new things.
|
6 |
+
|
7 |
+
> That's how the `EA4ALL-Agentic system` was born and ever since continuously evolving to build an ecosystem of **Architects Agent partners**.
|
8 |
+
|
9 |
+
## Benefits
|
10 |
+
|
11 |
+
- `Empower individuals with Knowledge`: understand and talk about Business and Technology strategy, IT landscape, Architectue Artefacts in a single click of button.
|
12 |
+
- `Increase efficiency and productivity`: generate a documented architecture with diagram, model and descriptions. Accelerate Business Requirement identification and translation to Target Reference Architecture. Automated steps and reduced times for task execution.
|
13 |
+
- `Improve agility`: plan, execute, review and iterate over EA inputs and outputs. Increase the ability to adapt, transform and execute at pace and scale in response to changes in strategy, threats and opportunities.
|
14 |
+
- `Increase collaboration`: democratise architecture work and knowledge with anyone using natural language.
|
15 |
+
- `Cost optimisation`: intelligent allocation of architects time for valuable business tasks.
|
16 |
+
- `Business Growth`: create / re-use of (new) products and services, and people experience enhancements.
|
17 |
+
- `Resilience`: assess solution are secured by design, poses any risk and how to mitigate, apply best-practices.
|
18 |
+
|
19 |
+
|
20 |
+
## Knowledge context
|
21 |
+
|
22 |
+
Synthetic datasets are used to exemplify the Agentic System capabilities.
|
23 |
+
|
24 |
+
### IT Landscape Question and Answering
|
25 |
+
|
26 |
+
- Application name
|
27 |
+
- Business fit: appropriate, inadequate, perfect
|
28 |
+
- Technical fit: adequate, insufficient, perfect
|
29 |
+
- Business_criticality: operational, medium, high, critical
|
30 |
+
- Roadmap: maintain, invest, divers
|
31 |
+
- Architect responsible
|
32 |
+
- Hosting: user device, on-premise, IaaS, SaaS
|
33 |
+
- Business capability
|
34 |
+
- Business domain
|
35 |
+
- Description
|
36 |
+
|
37 |
+
- Bring Your Own Data: upload your own IT landscape data
|
38 |
+
- Application Portfolio Management
|
39 |
+
- xlsx tabular format
|
40 |
+
- first row (header) with fields name (colums)
|
41 |
+
|
42 |
+
### Architecture Diagram Visual Question and Answering
|
43 |
+
|
44 |
+
- Architecture Visual Artefacts
|
45 |
+
- jpeg, png
|
46 |
+
|
47 |
+
**Disclaimer**
|
48 |
+
- Your data & image are not accessible or shared with anyone else nor used for training purpose.
|
49 |
+
- EA4ALL-VQA Agent should be used ONLY FOR Architecture Diagram images.
|
50 |
+
- This feature should NOT BE USED to process inappropriate content.
|
51 |
+
|
52 |
+
### Reference Architecture Generation
|
53 |
+
|
54 |
+
- Clock in/out Use-case
|
55 |
+
|
56 |
+
## Log / Traceability
|
57 |
+
|
58 |
+
For purpose of continuous improvement, agentic workflows are logged in.
|
59 |
+
|
60 |
+
## Architecture
|
61 |
+
|
62 |
+
<italic>Core architecture built upon python, langchain, meta-faiss, gradio and Openai.<italic>
|
63 |
+
|
64 |
+
- Python
|
65 |
+
- Pandas
|
66 |
+
- Langchain
|
67 |
+
- Langsmith
|
68 |
+
- Langgraph
|
69 |
+
- Huggingface
|
70 |
+
|
71 |
+
- RAG (Retrieval Augmented Generation)
|
72 |
+
- Vectorstore
|
73 |
+
|
74 |
+
- Prompt Engineering
|
75 |
+
- Strategy & tactics: Task / Sub-tasks
|
76 |
+
- Agentic Workflow
|
77 |
+
|
78 |
+
- Models:
|
79 |
+
- OpenAI
|
80 |
+
- Llama
|
81 |
+
|
82 |
+
- Hierarchical-Agent-Teams:
|
83 |
+
- Tabular-question-answering over your own document
|
84 |
+
- Supervisor
|
85 |
+
- Visual Questions Answering
|
86 |
+
- Diagram Component Analysis
|
87 |
+
- Risk & Vulnerability and Mitigation options
|
88 |
+
- Well-Architecture Design Assessment
|
89 |
+
- Vision and Target Architecture
|
90 |
+
|
91 |
+
- User Interface
|
92 |
+
- Gradio
|
93 |
+
|
94 |
+
- Hosting: Huggingface Space
|
95 |
+
|
96 |
+
## Agentic System Architecture
|
ea4all/ea4all_store/reference_architecture_dbr_assistant.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Purpose of this document
|
2 |
+
|
3 |
+
The purpose of this document is to provide an overview of the project and specifically detail the business requirements for the project across the relevant business and market areas.
|
4 |
+
|
5 |
+
The requirements and solution will be agreed with the project sponsor (s) through formal review and sign off of this document.
|
6 |
+
Once signed off it will provide input to the subsequent design and development phases of the project.
|
7 |
+
|
8 |
+
Why we need this project
|
9 |
+
I want an assistant to take notes during a workshop and translate that into a pseudo process and generate a visual representation that I can then refine in a focused session.
|
ea4all/ea4all_store/reference_architecture_dbr_demo.txt
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Purpose of this document
|
2 |
+
|
3 |
+
The purpose of this document is to provide an overview of the project and specifically detail the business requirements for the project across the relevant business and market areas.
|
4 |
+
|
5 |
+
The requirements and solution will be agreed with the project sponsor (s) through formal review and sign off of this document.
|
6 |
+
Once signed off it will provide input to the subsequent design and development phases of the project.
|
7 |
+
|
8 |
+
Context
|
9 |
+
(These areas can be taken from the Brief/ PID as appropriate)
|
10 |
+
Background
|
11 |
+
• Change in external scenario - more integrated supervisory organs;
|
12 |
+
• Validity of the exception model adopted questioning (number of inquiries)
|
13 |
+
• Labor lawsuits average ticket is very high (number of lawsuits)
|
14 |
+
|
15 |
+
Why we need this project
|
16 |
+
To make it possible to control the schedule of work on employees that are legally required to.
|
17 |
+
|
18 |
+
Expected Business Outcome / Objective (Goal)
|
19 |
+
To implement the Electronic Timecard in all company business units to the public that are subject to the schedule of work and by that, reduce the number and impact of worktime related lawsuits
|
20 |
+
|
21 |
+
Project Objectives
|
22 |
+
Be compliance with current regulation regarding Timestamp with all employees with work schedule.
|
23 |
+
|
24 |
+
Ref,Feature,Description,MoSCoW
|
25 |
+
A,Input,Registration of ins/outs of employees at the system,M
|
26 |
+
B,Joiner,Registration of new employees considering the new system,M
|
27 |
+
C,Workplace Change,Changes of a workplace of a given employee,M
|
28 |
+
D,employee time,may it be from subject to worktime to noto r vice versa,M
|
29 |
+
E,New Equipment,New equipment instalation on facilities,M
|
30 |
+
F,Calendar change,Change of holydays of a given workplace,M
|
31 |
+
G,New Worktime,Creation of new Time schedules for employees,M
|
32 |
+
H,New balance rule,Creation of new Time balance rules for employees,M
|
33 |
+
|
34 |
+
|
35 |
+
Actor Catalogue
|
36 |
+
|
37 |
+
Name, Description,Goals
|
38 |
+
Employees,Employee of company under time control,To register ins and outs
|
39 |
+
Coordinator,Immediate superior of non-computer user employee,To register daily activities regarding Time management of non-computer user employees subject to them
|
40 |
+
Immediate superior,Immediate superior of employee,To approve daily activities regarding Time management of employees
|
41 |
+
Manager,Employees first manager,To approve JMLs and monthly activities regarding Time management of employees
|
42 |
+
Local Medical Service,Business unity Doctor,To include absence regarding sick leaves
|
43 |
+
HRSS,Key users of Shared Services of Human Resources,To manage the back end of time system
|
ea4all/ea4all_store/strategic_principles.txt
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Strategic Principles
|
2 |
+
architecture_principles = """
|
3 |
+
| Architecture Principle | Description |
|
4 |
+
| --- | --- |
|
5 |
+
| **Business Continuity** | The architecture must ensure that critical business functions can continue to operate during and after a disaster or unexpected downtime. |
|
6 |
+
| **Interoperability** | Systems and data must be able to interact with each other, both within and across organizational boundaries. |
|
7 |
+
| **Modularity** | The architecture should be composed of modular components that can be independently updated or replaced. |
|
8 |
+
| **Scalability** | The architecture should be designed to handle increasing amounts of work in a graceful manner. |
|
9 |
+
| **Secure by Design** | The architecture must protect information and systems from unauthorized access and provide confidentiality, integrity, and availability. |
|
10 |
+
| **Simplicity** | The architecture should be as simple as possible, while still meeting business needs. Avoid unnecessary complexity. |
|
11 |
+
| **Standardization** | Use industry standards where they exist and are appropriate for the business. |
|
12 |
+
| **Sustainability** | The architecture should be sustainable and consider the environmental impact of IT decisions. |
|
13 |
+
| **User-Centric** | The architecture should focus on the user experience, and be designed with the needs and behaviors of the user in mind.
|
14 |
+
"""
|
15 |
+
|
16 |
+
business_principles = """
|
17 |
+
| Business Principle | Description |
|
18 |
+
| --- | --- |
|
19 |
+
| **Customer Focus** | The interests of the customer must be at the center of all decisions and operations. |
|
20 |
+
| **Value Creation** | Every initiative and operation should aim to create value for the customers and the business. |
|
21 |
+
| **Continuous Improvement** | The business should always strive for better ways to deliver value, through innovation and improvement. |
|
22 |
+
| **Integrity** | The business should operate in an ethical and transparent manner. |
|
23 |
+
| **Collaboration** | Working together across teams and departments is essential for delivering value. |
|
24 |
+
| **Agility** | The business should be able to quickly respond to changes in the market or environment. |
|
25 |
+
| **Sustainability** | Decisions should consider their long-term impact on the environment and society. |
|
26 |
+
| **Accountability** | Every team and individual in the business should take responsibility for their actions and decisions. |
|
27 |
+
| **Data-Driven Decision Making** | Decisions should be based on data and factual information.
|
28 |
+
"""
|
29 |
+
|
30 |
+
technology_principles = """
|
31 |
+
| Technology Principle | Description |
|
32 |
+
| --- | --- |
|
33 |
+
| **Reliability** | Systems should be dependable and perform consistently under all conditions. |
|
34 |
+
| **Maintainability** | Technology should be easy to update and improve over time. |
|
35 |
+
| **Efficiency** | Systems and processes should be designed to minimize waste and maximize productivity. |
|
36 |
+
| **User-Centric Design** | Technology should be designed with the end user in mind, ensuring it is easy to use and meets user needs. |
|
37 |
+
| **Data Integrity** | Ensuring the accuracy and consistency of data over its entire lifecycle. |
|
38 |
+
| **Sustainability** | Technology decisions should consider their impact on the environment. |
|
39 |
+
| **Innovation** | Embracing new technologies and ideas to stay competitive and meet evolving business needs.
|
40 |
+
"""
|
ea4all/main.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
##version 2025-06-04
|
2 |
+
#ea4all-gradio-agent-mcp-entry-point
|
3 |
+
from ea4all.__main__ import main
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
main()
|
ea4all/packages.txt
ADDED
File without changes
|
ea4all/src/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Shared utilities module."""
|
2 |
+
|
3 |
+
#from ea4all.src.graph import super_graph
|
4 |
+
#__all__ = ["super_graph"]
|
ea4all/src/ea4all_apm/configuration.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Define the configurable parameters for the APM agent."""
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
from dataclasses import dataclass, field
|
6 |
+
from typing import Annotated, Literal
|
7 |
+
|
8 |
+
import ea4all.src.ea4all_apm.prompts as prompts
|
9 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
10 |
+
|
11 |
+
@dataclass(kw_only=True)
|
12 |
+
class AgentConfiguration(BaseConfiguration):
|
13 |
+
"""The configuration for the agent."""
|
14 |
+
|
15 |
+
# prompts
|
16 |
+
router_system_prompt: str = field(
|
17 |
+
default=prompts.ROUTER_SYSTEM_PROMPT,
|
18 |
+
metadata={
|
19 |
+
"description": "The system prompt used for classifying user questions to route them to the correct node."
|
20 |
+
},
|
21 |
+
)
|
22 |
+
|
23 |
+
query_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
24 |
+
default="meta-llama/Llama-3.3-70B-Instruct",
|
25 |
+
metadata={
|
26 |
+
"description": "The language model used for processing and refining queries. Should be in the form: provider/model-name."
|
27 |
+
},
|
28 |
+
)
|
29 |
+
|
30 |
+
response_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
31 |
+
default="meta-llama/Llama-3.3-70B-Instruct",
|
32 |
+
metadata={
|
33 |
+
"description": "The language model used for generating responses. Should be in the form: provider/model-name."
|
34 |
+
},
|
35 |
+
)
|
ea4all/src/ea4all_apm/graph.py
ADDED
@@ -0,0 +1,931 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Main entrypoint for the conversational APM graph.
|
2 |
+
|
3 |
+
This module defines the core structure and functionality of the conversational
|
4 |
+
APM graph. It includes the main graph definition, state management,
|
5 |
+
and key functions for processing & routing user queries, generating answer to
|
6 |
+
Enterprise Architecture related user questions
|
7 |
+
about an IT Landscape or Websearch.
|
8 |
+
"""
|
9 |
+
import json
|
10 |
+
import tempfile
|
11 |
+
import os
|
12 |
+
|
13 |
+
from langgraph.graph import END, StateGraph
|
14 |
+
|
15 |
+
#core libraries
|
16 |
+
from langchain_core.runnables import RunnableConfig
|
17 |
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
18 |
+
from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
|
19 |
+
from langchain_core.prompts import ChatPromptTemplate
|
20 |
+
from langchain_core.output_parsers.json import JsonOutputParser
|
21 |
+
from langchain_core.output_parsers import StrOutputParser
|
22 |
+
from langchain_core.runnables.history import RunnableLambda
|
23 |
+
from langchain_core.runnables import RunnablePassthrough, RunnableConfig
|
24 |
+
from langchain_core.runnables import RunnableGenerator
|
25 |
+
from langchain_core.documents import Document
|
26 |
+
|
27 |
+
from langchain.load import dumps, loads
|
28 |
+
from langchain.hub import pull
|
29 |
+
|
30 |
+
##Utils and tools
|
31 |
+
from langchain_community.document_loaders import JSONLoader
|
32 |
+
from langchain_community.utilities import BingSearchAPIWrapper
|
33 |
+
from langchain_community.tools.bing_search.tool import BingSearchResults
|
34 |
+
|
35 |
+
from operator import itemgetter
|
36 |
+
|
37 |
+
#compute amount of tokens used
|
38 |
+
import tiktoken
|
39 |
+
|
40 |
+
#import APMGraph packages
|
41 |
+
from ea4all.src.ea4all_apm.configuration import AgentConfiguration
|
42 |
+
from ea4all.src.ea4all_apm.state import APMState, InputState
|
43 |
+
import ea4all.src.ea4all_apm.prompts as e4p
|
44 |
+
from ea4all.src.shared.utils import (
|
45 |
+
load_mock_content,
|
46 |
+
get_llm_client,
|
47 |
+
get_history_gradio,
|
48 |
+
extract_structured_output,
|
49 |
+
extract_topic_from_business_input,
|
50 |
+
_join_paths,
|
51 |
+
)
|
52 |
+
from ea4all.src.shared import vectorstore
|
53 |
+
|
54 |
+
# This file contains sample APM QUESTIONS
|
55 |
+
APM_MOCK_QNA = "apm_qna_mock.txt"
|
56 |
+
|
57 |
+
async def retrieve_documents(
|
58 |
+
state: APMState, *, config: RunnableConfig
|
59 |
+
) -> dict[str, list[Document]]:
|
60 |
+
"""Retrieve documents based on a given query.
|
61 |
+
|
62 |
+
This function uses a retriever to fetch relevant documents for a given query.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
state (QueryState): The current state containing the query string.
|
66 |
+
config (RunnableConfig): Configuration with the retriever used to fetch documents.
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
dict[str, list[Document]]: A dictionary with a 'documents' key containing the list of retrieved documents.
|
70 |
+
"""
|
71 |
+
with vectorstore.make_retriever(config) as retriever:
|
72 |
+
response = await retriever.ainvoke(state.question, config)
|
73 |
+
return {"documents": response}
|
74 |
+
|
75 |
+
async def apm_retriever(config: RunnableConfig):
|
76 |
+
with vectorstore.make_retriever(config) as retriever:
|
77 |
+
response = retriever
|
78 |
+
|
79 |
+
return response
|
80 |
+
|
81 |
+
# Few Shot Examples
|
82 |
+
few_shot_step_back_examples = [
|
83 |
+
{
|
84 |
+
"input": "Who can I talk to about innovation?",
|
85 |
+
"output": '{"datasource": "vectorstore, "topic":"who can I talk to"}"}',
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"input": "Describe the finance landscape.",
|
89 |
+
"output": '{"datasource": "vectorstore", "topic:":"line of business landscape"}',
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"input": "What applications support the marketing landscape?",
|
93 |
+
"output": '{"datasource": "vectorstore", "topic:":"line of business landscape"}',
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"input": "List the simplification opportunities for the collaboration space.",
|
97 |
+
"output": '{"datasource": "vectorstore", "topic:":"line of business landscape"}',
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"input": "What are the available patterns to deploy AI applications into AWS?",
|
101 |
+
"output": '{"datasource": "websearch", "topic:":"design patterns"}',
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"input": "What is a Well-Architected Framework?",
|
105 |
+
"output": '{"datasource": "websearch", "topic:":"architecture framework"}',
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"input": "What is a Cloud Assessment Framework?",
|
109 |
+
"output": '{"datasource": "websearch", "topic:":"cloud assessment framework"}',
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"input": "What are the main architecture frameworks?",
|
113 |
+
"output": '{"datasource": "websearch", "topic:":"architecture framework"}',
|
114 |
+
},
|
115 |
+
]
|
116 |
+
|
117 |
+
# We now transform these to example messages
|
118 |
+
few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
|
119 |
+
[
|
120 |
+
("human", "{input}"),
|
121 |
+
("ai", "{output}"),
|
122 |
+
]
|
123 |
+
)
|
124 |
+
|
125 |
+
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
126 |
+
input_variables=["user_question"],
|
127 |
+
example_prompt=few_shot_step_back_examples_prompt,
|
128 |
+
examples=few_shot_step_back_examples,
|
129 |
+
)
|
130 |
+
|
131 |
+
## RAG from scratch: Query Translations functions
|
132 |
+
def get_unique_union(documents: list[list]):
|
133 |
+
""" Unique union of retrieved docs """
|
134 |
+
# Flatten list of lists, and convert each Document to string
|
135 |
+
flattened_docs = [dumps(doc) for sublist in documents for doc in sublist]
|
136 |
+
# Get unique documents
|
137 |
+
unique_docs = list(set(flattened_docs))
|
138 |
+
# Return
|
139 |
+
return [loads(doc) for doc in unique_docs]
|
140 |
+
|
141 |
+
def reciprocal_rank_fusion(results: list[list], k=60):
|
142 |
+
""" Reciprocal_rank_fusion that takes multiple lists of ranked documents
|
143 |
+
and an optional parameter k used in the RRF formula """
|
144 |
+
|
145 |
+
# Initialize a dictionary to hold fused scores for each unique document
|
146 |
+
fused_scores = {}
|
147 |
+
|
148 |
+
# Iterate through each list of ranked documents
|
149 |
+
for docs in results:
|
150 |
+
# Iterate through each document in the list, with its rank (position in the list)
|
151 |
+
for rank, doc in enumerate(docs):
|
152 |
+
# Convert the document to a string format to use as a key (assumes documents can be serialized to JSON)
|
153 |
+
doc_str = doc.metadata['source']
|
154 |
+
# If the document is not yet in the fused_scores dictionary, add it with an initial score of 0
|
155 |
+
if doc_str not in fused_scores:
|
156 |
+
fused_scores[doc_str] = [doc,0]
|
157 |
+
# Retrieve the current score of the document, if any
|
158 |
+
#previous_score = fused_scores[doc_str]
|
159 |
+
# Update the score of the document using the RRF formula: 1 / (rank + k)
|
160 |
+
fused_scores[doc_str][1] += 1 / (rank + k)
|
161 |
+
|
162 |
+
# Sort the documents based on their fused scores in descending order to get the final reranked results
|
163 |
+
reranked_results = [
|
164 |
+
doc[0]
|
165 |
+
for source, doc in sorted(fused_scores.items(), key=lambda x: x[0], reverse=True)
|
166 |
+
]
|
167 |
+
|
168 |
+
# Return the reranked results as a list of tuples, each containing the document and its fused score
|
169 |
+
return reranked_results
|
170 |
+
|
171 |
+
def format_qa_pair(question, answer):
|
172 |
+
"""Format Q and A pair"""
|
173 |
+
|
174 |
+
formatted_string = ""
|
175 |
+
formatted_string += f"Question: {question}\nAnswer: {answer}\n\n"
|
176 |
+
return formatted_string.strip()
|
177 |
+
|
178 |
+
async def get_retrieval_chain(rag_input, ea4all_user, question, retriever, config: RunnableConfig):
|
179 |
+
|
180 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
181 |
+
llm = get_llm_client(configuration.query_model, api_base_url=configuration.api_base_url)
|
182 |
+
|
183 |
+
#retriever = retriever_faiss(db, ea4all_user)
|
184 |
+
#CHANGE: Receive as parameter originer
|
185 |
+
#retriever = await apm_retriever(config) #NEEDS retrofit to add user_login
|
186 |
+
|
187 |
+
if rag_input == 1: # Multi-query
|
188 |
+
## RAG Query Transformation: Multi query
|
189 |
+
prompt_perspectives = ChatPromptTemplate.from_template(e4p.multiquery_template)
|
190 |
+
generate_queries = (
|
191 |
+
prompt_perspectives
|
192 |
+
| llm
|
193 |
+
| StrOutputParser()
|
194 |
+
| (lambda x: x.split("\n"))
|
195 |
+
)
|
196 |
+
# Retrieve chain
|
197 |
+
retrieval_chain = generate_queries | retriever.map() | get_unique_union
|
198 |
+
|
199 |
+
elif rag_input == 2: # RAG Fusion
|
200 |
+
# Prompt
|
201 |
+
prompt_rag_fusion = ChatPromptTemplate.from_template(e4p.rag_fusion_questions_template)
|
202 |
+
generate_queries = (
|
203 |
+
prompt_rag_fusion
|
204 |
+
| llm
|
205 |
+
| StrOutputParser()
|
206 |
+
| (lambda x: x.split("\n"))
|
207 |
+
)
|
208 |
+
# Retrieval chain
|
209 |
+
retrieval_chain = generate_queries | retriever.map() | reciprocal_rank_fusion
|
210 |
+
|
211 |
+
elif rag_input == 3: # Decomposition
|
212 |
+
# Build prompt
|
213 |
+
prompt_decomposition = ChatPromptTemplate.from_template(e4p.decomposition_template)
|
214 |
+
# Chain
|
215 |
+
generate_queries_decomposition = ( prompt_decomposition | llm | StrOutputParser() | (lambda x: x.split("\n")))
|
216 |
+
|
217 |
+
# Return new set of questions
|
218 |
+
questions = generate_queries_decomposition.invoke(
|
219 |
+
{"question": question},
|
220 |
+
{"tags": [os.environ['EA4ALL_ENV']], "metadata": {"ea4all_user": ea4all_user, "rag_input": rag_input}}
|
221 |
+
)
|
222 |
+
# Prompt: Answer recuservely
|
223 |
+
decomposition_prompt = ChatPromptTemplate.from_template(e4p.decomposition_answer_recursevely_template)
|
224 |
+
|
225 |
+
# Answer each question and return final answer
|
226 |
+
q_a_pairs = ""
|
227 |
+
for q in questions:
|
228 |
+
rag_chain = (
|
229 |
+
{"context": itemgetter("question") | retriever,
|
230 |
+
"question": itemgetter("question"),
|
231 |
+
"q_a_pairs": itemgetter("q_a_pairs")}
|
232 |
+
| decomposition_prompt
|
233 |
+
| llm
|
234 |
+
| StrOutputParser())
|
235 |
+
|
236 |
+
answer = rag_chain.invoke(
|
237 |
+
{"question":q,"q_a_pairs":q_a_pairs},
|
238 |
+
{"tags": [os.environ['EA4ALL_ENV']], "metadata": {"ea4all_user": ea4all_user, "rag_input": rag_input}}
|
239 |
+
)
|
240 |
+
|
241 |
+
q_a_pair = format_qa_pair(q,answer)
|
242 |
+
q_a_pairs = q_a_pairs + "\n---\n" + q_a_pair
|
243 |
+
|
244 |
+
return answer # Final response to user inquiry
|
245 |
+
|
246 |
+
elif rag_input == 4: # RAG Step-back
|
247 |
+
|
248 |
+
generate_queries_step_back = e4p.few_shot_step_back_prompt | llm | StrOutputParser()
|
249 |
+
|
250 |
+
generate_queries_step_back.invoke(
|
251 |
+
{"standalone_question": lambda x: x["standalone_question"]},
|
252 |
+
{"tags": [os.environ['EA4ALL_ENV']], "metadata": {"ea4all_user": ea4all_user, "rag_input": rag_input}}
|
253 |
+
)
|
254 |
+
|
255 |
+
response_prompt = ChatPromptTemplate.from_template(e4p.step_back_response_prompt_template)
|
256 |
+
|
257 |
+
retrieval_chain = (
|
258 |
+
{
|
259 |
+
# Retrieve context using the normal question
|
260 |
+
"normal_context": RunnableLambda(lambda x: x["standalone_question"]) | retriever,
|
261 |
+
# Retrieve context using the step-back question
|
262 |
+
"step_back_context": generate_queries_step_back | retriever,
|
263 |
+
# Pass on the question
|
264 |
+
"standalone_question": lambda x: x["standalone_question"],
|
265 |
+
}
|
266 |
+
| response_prompt
|
267 |
+
| llm
|
268 |
+
| StrOutputParser()
|
269 |
+
)
|
270 |
+
|
271 |
+
elif rag_input == 5: # RAG HyDE
|
272 |
+
# Prompt
|
273 |
+
prompt_hyde = ChatPromptTemplate.from_template(e4p.hyde_template)
|
274 |
+
generate_docs_for_retrieval = (
|
275 |
+
prompt_hyde |
|
276 |
+
llm |
|
277 |
+
StrOutputParser()
|
278 |
+
)
|
279 |
+
|
280 |
+
retrieval_chain = generate_docs_for_retrieval | retriever
|
281 |
+
|
282 |
+
else:
|
283 |
+
# Standard RAG approach - user query
|
284 |
+
retrieval_chain = itemgetter("standalone_question") | retriever
|
285 |
+
|
286 |
+
return retrieval_chain
|
287 |
+
|
288 |
+
#Get relevant asnwers to user query
|
289 |
+
##get_relevant_documents "deprecated" - replaced by invoke : 2024-06-07
|
290 |
+
def get_relevant_answers(query, config: RunnableConfig):
|
291 |
+
|
292 |
+
if query != "":
|
293 |
+
#retriever.vectorstore.index.ntotal
|
294 |
+
#retriever = retriever_faiss(user_ip)
|
295 |
+
#response = retriever.invoke({"standalone_question": query})
|
296 |
+
|
297 |
+
response = retrieve_documents(query, config)
|
298 |
+
return response
|
299 |
+
else:
|
300 |
+
return []
|
301 |
+
|
302 |
+
#Return LLM answer to user inquriy
|
303 |
+
def rag_llm(llm, chat_prompt, query, response):
|
304 |
+
answers = llm.invoke(
|
305 |
+
chat_prompt.format_prompt(
|
306 |
+
cdocs=response, query=query,
|
307 |
+
)
|
308 |
+
)
|
309 |
+
|
310 |
+
try:
|
311 |
+
return answers.content
|
312 |
+
except AttributeError:
|
313 |
+
return answers
|
314 |
+
|
315 |
+
#Save user apm to disk
|
316 |
+
def ea4all_serialize(apm_file, user_ip):
|
317 |
+
import pickle
|
318 |
+
|
319 |
+
# Specify the target filename
|
320 |
+
filename = _join_paths(AgentConfiguration.ea4all_store, f"apm_{user_ip}.pkl")
|
321 |
+
|
322 |
+
# Serialize and save the binary data to a file
|
323 |
+
try:
|
324 |
+
with open(filename, 'wb') as file:
|
325 |
+
pickle.dump(apm_file, file)
|
326 |
+
return True
|
327 |
+
# Some code that might raise an exception
|
328 |
+
except Exception:
|
329 |
+
# Handle the exception
|
330 |
+
return False
|
331 |
+
|
332 |
+
#number of tokens consumed
|
333 |
+
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
334 |
+
"""Returns the number of tokens in a text string."""
|
335 |
+
encoding = tiktoken.get_encoding(encoding_name)
|
336 |
+
num_tokens = len(encoding.encode(string))
|
337 |
+
return num_tokens
|
338 |
+
|
339 |
+
#retrieve relevant questions based on user interaction
|
340 |
+
def get_relevant_questions():
|
341 |
+
relevant_questions = []
|
342 |
+
mock = load_mock_content(APM_MOCK_QNA)
|
343 |
+
for line in mock.splitlines(): relevant_questions += [line]
|
344 |
+
|
345 |
+
return relevant_questions
|
346 |
+
|
347 |
+
#Rephrase the original user question based on system prompt to lead a better LLM answer
|
348 |
+
def user_query_rephrasing(
|
349 |
+
state: APMState, _prompt=None, *, config: RunnableConfig
|
350 |
+
) -> dict[str,str]:
|
351 |
+
|
352 |
+
question = getattr(state,'question')
|
353 |
+
|
354 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
355 |
+
# 'model = load_chat_model(configuration.query_model)
|
356 |
+
model = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
357 |
+
|
358 |
+
if _prompt:
|
359 |
+
rewrite_prompt = pull("learn-it-all-do-it-all/ea4all_user_question_rephrase")
|
360 |
+
inputs = {"user_question": question} #, "ai_output": e4p.LLAMA31_PROMPT_FORMAT}
|
361 |
+
else:
|
362 |
+
rewrite_prompt = pull("learn-it-all-do-it-all/ea4all_question_rewriter")
|
363 |
+
inputs = {"user_question": question, "target":"web search"}
|
364 |
+
|
365 |
+
rewrite_chain = rewrite_prompt | model | JsonOutputParser()
|
366 |
+
|
367 |
+
result = rewrite_chain.invoke(
|
368 |
+
input=inputs
|
369 |
+
)
|
370 |
+
|
371 |
+
try:
|
372 |
+
question = result['rephrased']
|
373 |
+
except Exception:
|
374 |
+
question = state.question
|
375 |
+
|
376 |
+
return {"question": question}
|
377 |
+
|
378 |
+
# Post-processing
|
379 |
+
def format_docs(docs):
|
380 |
+
return "\n".join(doc.page_content for doc in docs)
|
381 |
+
|
382 |
+
def identify_task_category(
|
383 |
+
question,chat_memory,config: RunnableConfig
|
384 |
+
):
|
385 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
386 |
+
|
387 |
+
prompt = pull("learn-it-all-do-it-all/apm_task_router")
|
388 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
389 |
+
|
390 |
+
try:
|
391 |
+
# x=get_history_gradio(x) extract Human / AI
|
392 |
+
# fake gradio chat memory
|
393 |
+
x={"chat_memory":[]}
|
394 |
+
x['chat_memory'] = chat_memory
|
395 |
+
# extract human message only
|
396 |
+
memory=""
|
397 |
+
for human, ai in x['chat_memory']: memory += human + ";"
|
398 |
+
|
399 |
+
chain_one = prompt | llm | JsonOutputParser()
|
400 |
+
result = chain_one.invoke({"user_question": memory + question if x else question})
|
401 |
+
|
402 |
+
#parse response and pass on to next chain2/prompt2
|
403 |
+
response = extract_topic_from_business_input(result)
|
404 |
+
|
405 |
+
return response
|
406 |
+
except Exception:
|
407 |
+
return {'primary': 'General Inquiry'}
|
408 |
+
|
409 |
+
def retrieval_grader(model):
|
410 |
+
prompt = PromptTemplate(
|
411 |
+
template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance
|
412 |
+
of a retrieved document to a user question. If the document contains keywords related to the user question,
|
413 |
+
grade it as relevant. It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n
|
414 |
+
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question. \n
|
415 |
+
Provide the binary score as a JSON with a single key 'score' and no premable or explanation.
|
416 |
+
<|eot_id|><|start_header_id|>user<|end_header_id|>
|
417 |
+
Here is the retrieved document: \n\n {document} \n\n
|
418 |
+
Here is the user question: {question} \n <|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
419 |
+
""",
|
420 |
+
input_variables=["user_question", "document"],
|
421 |
+
)
|
422 |
+
|
423 |
+
retrieval_grader = prompt | model | JsonOutputParser()
|
424 |
+
|
425 |
+
return retrieval_grader
|
426 |
+
|
427 |
+
def hallucination_grader(model):
|
428 |
+
# Prompt
|
429 |
+
prompt = pull("learn-it-all-do-it-all/ea4all_apm_hallucination_grader")
|
430 |
+
hallucination_grader = prompt | model | JsonOutputParser()
|
431 |
+
|
432 |
+
return hallucination_grader
|
433 |
+
|
434 |
+
def grade_answer(model):
|
435 |
+
# Prompt
|
436 |
+
prompt = PromptTemplate(
|
437 |
+
template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assistant and your task is to assess the answer relevance to address a user question.\n
|
438 |
+
Give a binary score 'yes' to indicate that the answer is relevant or 'no' otherwise.\n
|
439 |
+
Provide the binary score as a JSON with a keys 'score' and nothing else.\n
|
440 |
+
<|eot_id|><|start_header_id|>user<|end_header_id|> Here is the answer:
|
441 |
+
\n ------- \n
|
442 |
+
{generation}
|
443 |
+
\n ------- \n
|
444 |
+
Here is the question: {user_question} <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
|
445 |
+
input_variables=["generation", "user_question"],
|
446 |
+
)
|
447 |
+
|
448 |
+
answer_grader = prompt | model | JsonOutputParser()
|
449 |
+
|
450 |
+
return answer_grader
|
451 |
+
|
452 |
+
async def grade_documents(state, config: RunnableConfig):
|
453 |
+
"""
|
454 |
+
Determines whether the retrieved documents are relevant to the question
|
455 |
+
If any document is not relevant, we will set a flag to run web search
|
456 |
+
|
457 |
+
Args:
|
458 |
+
state (dict): The current graph state
|
459 |
+
|
460 |
+
Returns:
|
461 |
+
state (dict): Filtered out irrelevant documents and updated web_search state
|
462 |
+
"""
|
463 |
+
|
464 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
465 |
+
|
466 |
+
print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
|
467 |
+
question = state.question
|
468 |
+
documents = state.documents
|
469 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
470 |
+
|
471 |
+
# Score each doc
|
472 |
+
filtered_docs = []
|
473 |
+
web_search = "No"
|
474 |
+
for d in documents:
|
475 |
+
score = retrieval_grader(llm).ainvoke(
|
476 |
+
{"user_question": question, "document": d.page_content}
|
477 |
+
)
|
478 |
+
grade = score["score"]
|
479 |
+
# Document relevant
|
480 |
+
if grade.lower() == "yes":
|
481 |
+
print("---GRADE: DOCUMENT RELEVANT---")
|
482 |
+
filtered_docs.append(d)
|
483 |
+
# Document not relevant
|
484 |
+
else:
|
485 |
+
print("---GRADE: DOCUMENT NOT RELEVANT---")
|
486 |
+
# We do not include the document in filtered_docs
|
487 |
+
# We set a flag to indicate that we want to run web search
|
488 |
+
web_search = "Yes"
|
489 |
+
|
490 |
+
return {"documents": filtered_docs, "question": question, "web_search": web_search}
|
491 |
+
|
492 |
+
def decide_to_generate(state):
|
493 |
+
"""
|
494 |
+
Determines whether to generate an answer, or add web search
|
495 |
+
|
496 |
+
Args:
|
497 |
+
state (dict): The current graph state
|
498 |
+
|
499 |
+
Returns:
|
500 |
+
str: Binary decision for next node to call
|
501 |
+
"""
|
502 |
+
|
503 |
+
print("---ASSESS GRADED DOCUMENTS---")
|
504 |
+
state.question
|
505 |
+
web_search = state.web_search
|
506 |
+
getattr(state,'documents')
|
507 |
+
|
508 |
+
if web_search == "Yes":
|
509 |
+
# All documents have been filtered check_relevance
|
510 |
+
# We will re-generate a new query
|
511 |
+
print(
|
512 |
+
"---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, INCLUDE WEB SEARCH---"
|
513 |
+
)
|
514 |
+
return "websearch"
|
515 |
+
else:
|
516 |
+
# We have relevant documents, so generate answer
|
517 |
+
print("---DECISION: GENERATE---")
|
518 |
+
return "generate"
|
519 |
+
|
520 |
+
def grade_generation_v_documents_and_question(
|
521 |
+
state:APMState, config: RunnableConfig):
|
522 |
+
"""
|
523 |
+
Determines whether the generation is grounded in the document and answers question.
|
524 |
+
|
525 |
+
Args:
|
526 |
+
state (dict): The current graph state
|
527 |
+
|
528 |
+
Returns:
|
529 |
+
str: Decision for next node to call
|
530 |
+
"""
|
531 |
+
|
532 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
533 |
+
|
534 |
+
question = getattr(state,'question')
|
535 |
+
documents = getattr(state,'documents')
|
536 |
+
generation = getattr(state,'generation')
|
537 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
538 |
+
|
539 |
+
if getattr(state,'web_search') == "Yes":
|
540 |
+
#print("---CHECK HALLUCINATIONS---")
|
541 |
+
hallucination_grader_instance = hallucination_grader(llm)
|
542 |
+
#for output in hallucination_grader_instance.stream(
|
543 |
+
output = hallucination_grader_instance.invoke(
|
544 |
+
{"documents": documents, "generation": generation},
|
545 |
+
config={"tags":["stream_hallucination"]})
|
546 |
+
#yield(output)
|
547 |
+
grade = output["score"]
|
548 |
+
print("---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---") if grade=="yes" else exit
|
549 |
+
else:
|
550 |
+
grade = 'yes'
|
551 |
+
|
552 |
+
# Check hallucination
|
553 |
+
if grade == "yes":
|
554 |
+
#Check question-answering
|
555 |
+
print("---GRADE GENERATION vs QUESTION---")
|
556 |
+
grade_answer_instance = grade_answer(llm)
|
557 |
+
#for output in grade_answer_instance.stream(
|
558 |
+
output = grade_answer_instance.invoke(
|
559 |
+
{"user_question": question, "generation": generation},
|
560 |
+
config={"tags":["stream_grade_answer"]})
|
561 |
+
#yield(output)
|
562 |
+
grade = output["score"]
|
563 |
+
if grade == "yes":
|
564 |
+
print("---DECISION: GENERATION ADDRESSES QUESTION---")
|
565 |
+
yield "useful"
|
566 |
+
else:
|
567 |
+
print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
|
568 |
+
yield "not useful"
|
569 |
+
else:
|
570 |
+
print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---")
|
571 |
+
yield "not supported"
|
572 |
+
|
573 |
+
async def apm_query_router(
|
574 |
+
state: APMState, config: RunnableConfig
|
575 |
+
) -> str:
|
576 |
+
|
577 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
578 |
+
|
579 |
+
routing_prompt = pull('learn-it-all-do-it-all/ea4all-apm-user-question-routing')
|
580 |
+
|
581 |
+
#update prompt with few-shot-examples
|
582 |
+
updated_prompt = routing_prompt.from_messages([routing_prompt.messages[0], few_shot_prompt, routing_prompt.messages[1], routing_prompt.messages[2]])
|
583 |
+
# Apply partial variables to the created template
|
584 |
+
updated_prompt = updated_prompt.partial(
|
585 |
+
metadata=e4p.TEMPLATE_APM_QNA_ROUTING,
|
586 |
+
)
|
587 |
+
|
588 |
+
model = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
589 |
+
|
590 |
+
route = updated_prompt | model
|
591 |
+
|
592 |
+
##Rephrase user question to lead bettern LLM response
|
593 |
+
#PROMPT as context NOT WORKING AS EXPECTED 2024-09-23
|
594 |
+
user_query = user_query_rephrasing(state=state, _prompt=updated_prompt, config=config)['question']
|
595 |
+
|
596 |
+
response = await route.ainvoke({"user_question": user_query})
|
597 |
+
|
598 |
+
datasource = extract_structured_output(response.content)['datasource']
|
599 |
+
|
600 |
+
return datasource
|
601 |
+
|
602 |
+
async def retrieve(
|
603 |
+
state: APMState, config: RunnableConfig
|
604 |
+
):
|
605 |
+
"""
|
606 |
+
Retrieve documents
|
607 |
+
|
608 |
+
Args:
|
609 |
+
state (dict): The current graph state
|
610 |
+
|
611 |
+
Returns:
|
612 |
+
state (dict): New key added to state, documents, that contains retrieved documents
|
613 |
+
"""
|
614 |
+
|
615 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
616 |
+
|
617 |
+
#print("---RETRIEVE---")
|
618 |
+
question = getattr(state,'question')
|
619 |
+
|
620 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
621 |
+
|
622 |
+
with vectorstore.make_retriever(config) as _retriever:
|
623 |
+
retriever = _retriever
|
624 |
+
|
625 |
+
# First we add a step to load memory from gr.ChatInterface.history_chat
|
626 |
+
# This adds a "memory" key to the input object
|
627 |
+
loaded_memory = RunnablePassthrough.assign(
|
628 |
+
chat_history = RunnableLambda(get_history_gradio) | itemgetter("history"))
|
629 |
+
|
630 |
+
# Now we calculate the standalone question <= Original Question + ChatHistory
|
631 |
+
standalone_question = {
|
632 |
+
"standalone_question": {
|
633 |
+
"chat_history": lambda x: str(x["chat_history"]),
|
634 |
+
"user_question": lambda x: x['user_question']
|
635 |
+
}
|
636 |
+
| e4p.CONDENSE_QUESTION_PROMPT
|
637 |
+
| llm
|
638 |
+
| StrOutputParser()
|
639 |
+
}
|
640 |
+
|
641 |
+
# Retrieval
|
642 |
+
rag_input = int(getattr(state,'rag'))
|
643 |
+
retrieval_chain = await get_retrieval_chain(rag_input,"ea4all_agent",question,retriever, config=config)
|
644 |
+
|
645 |
+
retrieved_documents = {
|
646 |
+
"cdocs": retrieval_chain,
|
647 |
+
"user_question": itemgetter("standalone_question")
|
648 |
+
}
|
649 |
+
|
650 |
+
# And now we put it all together!
|
651 |
+
final_chain = loaded_memory | standalone_question | retrieved_documents
|
652 |
+
|
653 |
+
documents = await final_chain.ainvoke({"user_question": question, "chat_memory":[]})
|
654 |
+
|
655 |
+
return {"documents": format_docs(documents['cdocs']), "question": question, "rag":getattr(state,'rag')}
|
656 |
+
|
657 |
+
async def websearch(
|
658 |
+
state: APMState, config: RunnableConfig
|
659 |
+
) -> dict[str,any]:
|
660 |
+
"""
|
661 |
+
Web search based on the re-phrased question.
|
662 |
+
|
663 |
+
Args:
|
664 |
+
state (dict): The current graph state
|
665 |
+
config (RunnableConfig): Configuration with the model used for query analysis.
|
666 |
+
|
667 |
+
Returns:
|
668 |
+
state (dict): Updates documents key with appended web results
|
669 |
+
"""
|
670 |
+
|
671 |
+
# print("---WEB SEARCH---")
|
672 |
+
##Rephrase user question to lead bettern LLM response
|
673 |
+
question = user_query_rephrasing(state=state, config=config)['question']
|
674 |
+
|
675 |
+
##API Wrapper
|
676 |
+
search = BingSearchAPIWrapper()
|
677 |
+
|
678 |
+
##Bing Search Results
|
679 |
+
web_results = BingSearchResults(k=3, api_wrapper=search)
|
680 |
+
result = await web_results.ainvoke(
|
681 |
+
{"query": question},
|
682 |
+
)
|
683 |
+
fixed_string = result.replace("'", "\"")
|
684 |
+
result_json = json.loads(fixed_string)
|
685 |
+
|
686 |
+
# Create a temporary file
|
687 |
+
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
|
688 |
+
# Write the JSON data to the temporary file
|
689 |
+
json.dump(result_json, temp_file)
|
690 |
+
temp_file.flush()
|
691 |
+
|
692 |
+
# Load the JSON data from the temporary file
|
693 |
+
loader = JSONLoader(file_path=temp_file.name, jq_schema=".[]", text_content=False)
|
694 |
+
docs = loader.load()
|
695 |
+
|
696 |
+
return {"documents": format_docs(docs), "question": question, "web_search": "Yes", "generation": None}
|
697 |
+
|
698 |
+
### Edges ###
|
699 |
+
def route_to_node(state:APMState):
|
700 |
+
|
701 |
+
if state.source == "websearch":
|
702 |
+
#print("---ROUTE QUESTION TO WEB SEARCH---")
|
703 |
+
return "websearch"
|
704 |
+
elif state.source == "vectorstore":
|
705 |
+
#print("---ROUTE QUESTION TO RAG---")
|
706 |
+
return "vectorstore"
|
707 |
+
|
708 |
+
async def route_question(
|
709 |
+
state: APMState, config: RunnableConfig
|
710 |
+
) -> dict[str, any]:
|
711 |
+
"""
|
712 |
+
Route question to web search or RAG.
|
713 |
+
|
714 |
+
Args:
|
715 |
+
state (dict): The current graph state
|
716 |
+
|
717 |
+
Returns:
|
718 |
+
str: Next node to call
|
719 |
+
"""
|
720 |
+
|
721 |
+
#print("---ROUTE QUESTION---")
|
722 |
+
source = await apm_query_router(state, config)
|
723 |
+
|
724 |
+
return {"source":source}
|
725 |
+
|
726 |
+
async def stream_generation(
|
727 |
+
state: APMState, config: RunnableConfig
|
728 |
+
):
|
729 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
730 |
+
|
731 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url,streaming=configuration.streaming)
|
732 |
+
|
733 |
+
async for s in state:
|
734 |
+
documents = getattr(s,"documents")
|
735 |
+
web_search = getattr(s,"web_search")
|
736 |
+
question = getattr(s,"question")
|
737 |
+
chat_memory = getattr(s,"chat_memory")
|
738 |
+
|
739 |
+
# Prompt Web Search generation
|
740 |
+
if web_search == "Yes":
|
741 |
+
prompt = PromptTemplate(
|
742 |
+
template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an enterprise architect assistant for question-answering tasks.
|
743 |
+
Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know.
|
744 |
+
Keep the answer concise <|eot_id|><|start_header_id|>user<|end_header_id|>
|
745 |
+
Question: {user_question}
|
746 |
+
Context: {cdocs}
|
747 |
+
Answer: <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
|
748 |
+
input_variables=["user_question", "cdocs"],
|
749 |
+
)
|
750 |
+
else:
|
751 |
+
# Now we construct the inputs for the final prompt
|
752 |
+
# identify primary, second category
|
753 |
+
tc = identify_task_category(question,chat_memory,config)
|
754 |
+
prompt = e4p.ea4ll_prompt_selector(tc['primary'])
|
755 |
+
|
756 |
+
rag_chain = prompt | llm | StrOutputParser()
|
757 |
+
|
758 |
+
async for output in rag_chain.astream({"cdocs": documents, "user_question": question}):
|
759 |
+
yield(output)
|
760 |
+
|
761 |
+
async def generate(
|
762 |
+
state: APMState, config: RunnableConfig
|
763 |
+
) -> dict[str, any]:
|
764 |
+
"""
|
765 |
+
Generate answer
|
766 |
+
|
767 |
+
Args:
|
768 |
+
state (dict): The current graph state
|
769 |
+
config (RunnableConfig): Configuration with the model used for query analysis.
|
770 |
+
|
771 |
+
Returns:
|
772 |
+
state (dict): New key added to state, generation, that contains LLM generation
|
773 |
+
"""
|
774 |
+
#print("---GENERATE---")
|
775 |
+
|
776 |
+
documents = getattr(state,'documents')
|
777 |
+
web_search = getattr(state,'web_search')
|
778 |
+
question = getattr(state,'question')
|
779 |
+
|
780 |
+
##Triggered by hallucination_grade? 2025-02-21 - NOT USER being edged to END atm
|
781 |
+
#2025-02-21: it's being triggered by super_graph supervisor as well - need to review as calling web_search twice
|
782 |
+
#if getattr(state,'generation') is None:
|
783 |
+
# if getattr(state,'web_search') == "Yes":
|
784 |
+
# await websearch(state, config)
|
785 |
+
# else:
|
786 |
+
# state.rag = "1"
|
787 |
+
# await retrieve(state, config)
|
788 |
+
|
789 |
+
# Generate answer
|
790 |
+
tags = ["websearch_stream"] if web_search == "Yes" else ["apm_stream"]
|
791 |
+
gen = RunnableGenerator(stream_generation).with_config(tags=tags)
|
792 |
+
generation=""
|
793 |
+
async for message in gen.astream(state):
|
794 |
+
generation = ''.join([generation,message])
|
795 |
+
|
796 |
+
return {"documents": documents, "question": question, "generation": generation, "web_search": web_search}
|
797 |
+
|
798 |
+
#ea4all-qna-agent-conversational-with-memory
|
799 |
+
async def apm_agentic_qna(
|
800 |
+
state:APMState, config: RunnableConfig):
|
801 |
+
|
802 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
803 |
+
|
804 |
+
question = getattr(state,'question')
|
805 |
+
chat_memory = getattr(state,'chat_memory')
|
806 |
+
|
807 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
808 |
+
|
809 |
+
retriever = await apm_retriever(config)
|
810 |
+
|
811 |
+
# First we add a step to load memory from gr.ChatInterface.history_chat
|
812 |
+
# This adds a "memory" key to the input object
|
813 |
+
loaded_memory = RunnablePassthrough.assign(
|
814 |
+
chat_history = itemgetter("chat_memory"))
|
815 |
+
|
816 |
+
# Now we calculate the standalone question <= Original Question + ChatHistory
|
817 |
+
standalone_question = {
|
818 |
+
"standalone_question": {
|
819 |
+
"chat_history": lambda x: str(x["chat_history"]),
|
820 |
+
"user_question": lambda x: x["user_question"]
|
821 |
+
}
|
822 |
+
| e4p.CONDENSE_QUESTION_PROMPT
|
823 |
+
| llm
|
824 |
+
| StrOutputParser()
|
825 |
+
}
|
826 |
+
|
827 |
+
# Start with Hyde
|
828 |
+
prompt_hyde = ChatPromptTemplate.from_template(e4p.hyde_template)
|
829 |
+
generate_docs_for_retrieval = (
|
830 |
+
prompt_hyde |
|
831 |
+
llm |
|
832 |
+
StrOutputParser()
|
833 |
+
)
|
834 |
+
retrieval_chain = generate_docs_for_retrieval | retriever
|
835 |
+
|
836 |
+
retrieved_documents = {
|
837 |
+
"cdocs": retrieval_chain,
|
838 |
+
"query": itemgetter("standalone_question")
|
839 |
+
}
|
840 |
+
|
841 |
+
# And now we put it all together!
|
842 |
+
final_chain = loaded_memory | standalone_question | retrieved_documents
|
843 |
+
|
844 |
+
documents = await final_chain.ainvoke({"user_question": question, "chat_memory":chat_memory})
|
845 |
+
|
846 |
+
return {"documents": format_docs(documents['cdocs']), "question": question, "rag":5, "web_search": "No", "generation": None}
|
847 |
+
|
848 |
+
async def final(state: APMState):
|
849 |
+
return {"safety_status": state}
|
850 |
+
|
851 |
+
async def choose_next(state: APMState):
|
852 |
+
return "exit" if state.safety_status[0] == 'no' else "route"
|
853 |
+
|
854 |
+
class SafetyCheck:
|
855 |
+
def apm_safety_check(self,state: APMState, config: RunnableConfig):
|
856 |
+
|
857 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
858 |
+
question = state.question
|
859 |
+
|
860 |
+
safety_prompt = pull('learn-it-all-do-it-all/ea4all_apm_safety_check')
|
861 |
+
|
862 |
+
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
863 |
+
|
864 |
+
route = safety_prompt | llm | JsonOutputParser()
|
865 |
+
|
866 |
+
response = route.invoke({"user_question": question})
|
867 |
+
|
868 |
+
try:
|
869 |
+
score = response['score']
|
870 |
+
explain = response['response']
|
871 |
+
except ValueError:
|
872 |
+
score = 'no'
|
873 |
+
explain = 'I cannot answer your question at moment!'
|
874 |
+
|
875 |
+
return {"safety_status": [score, explain, question]}
|
876 |
+
|
877 |
+
def __init__(self):
|
878 |
+
self._safety_run = self.apm_safety_check
|
879 |
+
|
880 |
+
def __call__(self, state: APMState, config: RunnableConfig) -> dict[str, list]:
|
881 |
+
try:
|
882 |
+
response = getattr(self, '_safety_run')(state, config)
|
883 |
+
return {"safety_status": [response['safety_status'][0], "", state.question]}
|
884 |
+
except Exception as e:
|
885 |
+
return {"safety_status": ['no', e, state.question]}
|
886 |
+
|
887 |
+
##BUILD APM Graph
|
888 |
+
# Build graph
|
889 |
+
workflow = StateGraph(APMState, input=InputState, config_schema=AgentConfiguration)
|
890 |
+
|
891 |
+
# Define the nodes
|
892 |
+
workflow.add_node("safety_check",SafetyCheck())
|
893 |
+
workflow.add_node("route_question", route_question) # route to vectorstore or websearch
|
894 |
+
workflow.add_node("retrieve", apm_agentic_qna) # retrieve
|
895 |
+
workflow.add_node("websearch", websearch) # web search
|
896 |
+
workflow.add_node("generate", generate) # generate web search based answer
|
897 |
+
workflow.add_node("final", final)
|
898 |
+
|
899 |
+
workflow.set_entry_point("safety_check")
|
900 |
+
workflow.add_conditional_edges(
|
901 |
+
"safety_check",
|
902 |
+
choose_next,
|
903 |
+
{
|
904 |
+
"exit": "final",
|
905 |
+
"route": "route_question"
|
906 |
+
}
|
907 |
+
)
|
908 |
+
workflow.add_conditional_edges(
|
909 |
+
"route_question",
|
910 |
+
route_to_node,
|
911 |
+
{
|
912 |
+
"websearch": "websearch",
|
913 |
+
"vectorstore": "retrieve",
|
914 |
+
},
|
915 |
+
)
|
916 |
+
workflow.add_edge("retrieve", "generate")
|
917 |
+
workflow.add_edge("websearch", "generate")
|
918 |
+
workflow.add_conditional_edges( #2025-02-27: Conditional edges expect sync function only
|
919 |
+
"generate",
|
920 |
+
grade_generation_v_documents_and_question,
|
921 |
+
{
|
922 |
+
"not supported": "route_question",
|
923 |
+
"useful": END,
|
924 |
+
"not useful": END, ##2025-02-21: need to review THIS to try again and respond to user with a better answer
|
925 |
+
},
|
926 |
+
)
|
927 |
+
workflow.add_edge("final", END)
|
928 |
+
|
929 |
+
# Compile
|
930 |
+
apm_graph = workflow.compile()
|
931 |
+
apm_graph.name = "APMGraph"
|
ea4all/src/ea4all_apm/prompts.py
ADDED
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Default prompts and support functions."""
|
2 |
+
|
3 |
+
#prompt libraries
|
4 |
+
from langchain_core.prompts.chat import (ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate,)
|
5 |
+
from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
|
6 |
+
from langchain_core.prompts import format_document
|
7 |
+
from langchain_core.prompts import ChatPromptTemplate
|
8 |
+
from langchain.chains.prompt_selector import ConditionalPromptSelector
|
9 |
+
|
10 |
+
from langchain_core.output_parsers.json import JsonOutputParser
|
11 |
+
|
12 |
+
##return a prompt-template class with informed user inquiry
|
13 |
+
def ea4all_prompt(query):
|
14 |
+
prompt_template = PromptTemplate(
|
15 |
+
input_variables=["query", "answer"],
|
16 |
+
template=TEMPLATE_QUERY_ANSWER)
|
17 |
+
|
18 |
+
prompt = prompt_template.format(
|
19 |
+
query=query,
|
20 |
+
answer="")
|
21 |
+
|
22 |
+
return prompt
|
23 |
+
|
24 |
+
##return a chat-prompt-template class from the informed template
|
25 |
+
def ea4all_chat_prompt(template):
|
26 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
27 |
+
human_template = "{user_question}"
|
28 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
29 |
+
|
30 |
+
ea4all_prompt = ChatPromptTemplate.from_messages(
|
31 |
+
messages=[
|
32 |
+
system_message_prompt,
|
33 |
+
## MessagesPlaceholder(variable_name="history"),
|
34 |
+
human_message_prompt],
|
35 |
+
)
|
36 |
+
ea4all_prompt.output_parser=JsonOutputParser()
|
37 |
+
|
38 |
+
return ea4all_prompt
|
39 |
+
|
40 |
+
##select best prompt based on user inquiry's category
|
41 |
+
def ea4ll_prompt_selector(category):
|
42 |
+
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
|
43 |
+
default_prompt = ea4all_chat_prompt(GENERAL_TEMPLATE),
|
44 |
+
conditionals=[
|
45 |
+
(lambda category: True if category == "Strategy" else False, ea4all_chat_prompt(STRATEGY_TEMPLATE)),
|
46 |
+
(lambda category: True if category == "Application" else False, ea4all_chat_prompt(APPLICATION_TEMPLATE)),
|
47 |
+
(lambda category: True if category == "Recommendation" else False, ea4all_chat_prompt(RECOMMENDATION_TEMPLATE)),
|
48 |
+
(lambda category: True if category not in ("Strategy","Application", "Recommendation") else False, ea4all_chat_prompt(GENERAL_TEMPLATE))
|
49 |
+
]
|
50 |
+
)
|
51 |
+
|
52 |
+
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(category)
|
53 |
+
|
54 |
+
return(prompt)
|
55 |
+
|
56 |
+
|
57 |
+
##Template-basic instruction + context
|
58 |
+
TEMPLATE_CONTEXT = """You are a helpful Enterprise Architect with knowledge on enterprises IT landscapes.
|
59 |
+
Use only the context delimited by trible backticks to answer questions. Return the answer formatted as a text paragraph.
|
60 |
+
If you don't know the answer return I could not find the information.
|
61 |
+
Don't make up the response.
|
62 |
+
Context: ```{cdocs}```
|
63 |
+
Help answer: ""
|
64 |
+
"""
|
65 |
+
|
66 |
+
##Template-basic instruction + question + answer
|
67 |
+
TEMPLATE_QUERY_ANSWER = """You are Enterprise Architect highly knowledgable on IT landscape. \
|
68 |
+
Answer the question that is delimited by triple backticks into a style that is bullet list. \
|
69 |
+
If the question cannot be answered using the information provided answer with "I don't know". \
|
70 |
+
|
71 |
+
Always say "thanks for asking!" at the end of the answer.
|
72 |
+
|
73 |
+
Question: ```{user_question}```
|
74 |
+
Answer: {answer}
|
75 |
+
"""
|
76 |
+
|
77 |
+
TEMPLATE_APM_QNA_ROUTING = """application portfolio assessment, application/IT landscape rationalisation, simplification or optimisation, business capability assessment, line of business landscape, who can I talk to, assistance from architecture team."""
|
78 |
+
|
79 |
+
##Template-break-into-simpler-tasks
|
80 |
+
#https://platform.openai.com/docs/guides/prompt-engineering/strategy-split-complex-tasks-into-simpler-subtasks
|
81 |
+
TEMPLATE_HEADER = """You are a helpful enterprise architect assistant. """
|
82 |
+
TEMPLATE_HEADER += """Your goal is to provide accurate and detailed responses to user inquiry. """
|
83 |
+
TEMPLATE_HEADER += """You have access to a vast amount of enterprise architecture knowledge, """
|
84 |
+
TEMPLATE_HEADER += """and you can understand and generate language fluently. """
|
85 |
+
TEMPLATE_HEADER += """You can assist with a wide range of architectural topics, including but not limited to """
|
86 |
+
TEMPLATE_HEADER += """business, application, data and technology architectures. """
|
87 |
+
TEMPLATE_HEADER += """You should always strive to promote a positive and respectful conversation.
|
88 |
+
"""
|
89 |
+
|
90 |
+
TEMPLATE_TASKS = ""
|
91 |
+
TEMPLATE_TASKS += """You will be provided with a user inquiry. """
|
92 |
+
TEMPLATE_TASKS += """Classify the inquiry into primary category and secondary category. """
|
93 |
+
TEMPLATE_TASKS += """Primary categories: Strategy, Application, Recommendation or General Inquiry. """
|
94 |
+
TEMPLATE_TASKS += """Strategy secondary categories:
|
95 |
+
- Architecture and Technology Strategy
|
96 |
+
- Vision
|
97 |
+
- Architecture Principles
|
98 |
+
"""
|
99 |
+
TEMPLATE_TASKS += """Application secondary categories:
|
100 |
+
- Meet business and technical need
|
101 |
+
- Business criticality
|
102 |
+
- Roadmap
|
103 |
+
- Business Capability
|
104 |
+
- Hosting
|
105 |
+
"""
|
106 |
+
TEMPLATE_TASKS += """Recommendation secondary categories:
|
107 |
+
- Application rationalisation
|
108 |
+
- Landscape simplification
|
109 |
+
- Reuse existent invested application
|
110 |
+
- Business capability with overlapping applications
|
111 |
+
- Opportunities and innovation
|
112 |
+
"""
|
113 |
+
TEMPLATE_TASKS += """General inquiry:
|
114 |
+
- Speak to an architect
|
115 |
+
"""
|
116 |
+
TEMPLATE_TASKS += """You may also revise the original inquiry if you think that revising \
|
117 |
+
it will ultimately lead to a better response from the language model """
|
118 |
+
TEMPLATE_TASKS += """Provide your output in JSON format with the keys: primary, secondary, question.
|
119 |
+
"""
|
120 |
+
|
121 |
+
#Template-break-into-specific-prompt-by-category
|
122 |
+
strategy_template = """You will be provided with inquiry about architecture strategy.
|
123 |
+
Follow these steps to answer user inquiry:
|
124 |
+
STEP 1 - Using only the context delimited by triple backticks.
|
125 |
+
STEP 2 - Look at application with roadmap to invest.
|
126 |
+
STEP 3 - Extract the information that is only relevant to help answer the user inquiry
|
127 |
+
"""
|
128 |
+
|
129 |
+
application_template = """You will be provided with an inquiry about application architecture.
|
130 |
+
Follow these steps to answer user inquiry:
|
131 |
+
STEP 1 - Using only the context delimited by triple backticks.
|
132 |
+
STEP 2 - Extract the information that is only relevant to help answer the user inquiry
|
133 |
+
"""
|
134 |
+
|
135 |
+
recommendation_template = """You will be provided with enterprise architecture inquiry that needs a recommendation.
|
136 |
+
Follow these steps to answer user inquiry:
|
137 |
+
STEP 1 - Use only the context delimited by triple backticks.
|
138 |
+
STEP 2 - Look at applications with low business or technical fit
|
139 |
+
STEP 3 - Look at application with roadmap diffent to invest
|
140 |
+
STEP 4 - Look at applicatins hosted on premise
|
141 |
+
STEP 5 - Look at Business capability with overlapping applications
|
142 |
+
"""
|
143 |
+
|
144 |
+
general_template = """You will provided with a general inquiry about enterprise architecture IT landscape.
|
145 |
+
Follow these steps to answer user queries:
|
146 |
+
STEP 1 - use only the context delimited by triple backticks
|
147 |
+
STEP 2 - Extract the information that is only relevant to help answer the user inquiry
|
148 |
+
"""
|
149 |
+
|
150 |
+
default_template = """
|
151 |
+
FINAL STEP - Do not make up or guess ANY extra information. \
|
152 |
+
Ask follow-up question to the user if you need further clarification to understand and answer their inquiry. \
|
153 |
+
After a follow-up question if you still don't know the answer or don't find specific information needed to answer the user inquiry \
|
154 |
+
return I could not find the information. \
|
155 |
+
Ensure that the response contain all relevant context needed to interpret them -
|
156 |
+
in other words don't extract small snippets that are missing important context.
|
157 |
+
Format the output as top-like string formatted with the most appropriate style to make it clear, concise and user-friendly for a chatbot response.
|
158 |
+
Here is the question: {user_question}
|
159 |
+
Here is the context: ```{cdocs}```
|
160 |
+
"""
|
161 |
+
|
162 |
+
STRATEGY_TEMPLATE = TEMPLATE_HEADER + strategy_template + default_template
|
163 |
+
APPLICATION_TEMPLATE = TEMPLATE_HEADER + application_template + default_template
|
164 |
+
RECOMMENDATION_TEMPLATE = TEMPLATE_HEADER + recommendation_template + default_template
|
165 |
+
GENERAL_TEMPLATE = TEMPLATE_HEADER + general_template + default_template
|
166 |
+
|
167 |
+
|
168 |
+
###############################################
|
169 |
+
##COLLECTION of prompts for conversation memory
|
170 |
+
###############################################
|
171 |
+
|
172 |
+
_template = """Given the following conversation and a follow up question,\
|
173 |
+
rephrase the follow up question to be a standalone question, in its original language.\
|
174 |
+
Chat History:
|
175 |
+
{chat_history}
|
176 |
+
Follow Up Input: {user_question}
|
177 |
+
Standalone question:"""
|
178 |
+
|
179 |
+
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
180 |
+
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
|
181 |
+
|
182 |
+
|
183 |
+
def _combine_documents(
|
184 |
+
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
|
185 |
+
):
|
186 |
+
doc_strings = [format_document(doc, document_prompt) for doc in docs]
|
187 |
+
|
188 |
+
return document_separator.join(doc_strings)
|
189 |
+
|
190 |
+
|
191 |
+
##################################################
|
192 |
+
##COLLECTION of prompts - RAG query transformation
|
193 |
+
##################################################
|
194 |
+
## Multi Query
|
195 |
+
# Prompt
|
196 |
+
multiquery_template = """You are an AI Enterprise Architect language model assistant. Your task is to generate five
|
197 |
+
different versions of the given user question to retrieve relevant documents from a vector
|
198 |
+
database. By generating multiple perspectives on the user question, your goal is to help
|
199 |
+
the user overcome some of the limitations of the distance-based similarity search.
|
200 |
+
Provide these alternative questions separated by newlines. Original question: {standalone_question}"""
|
201 |
+
|
202 |
+
decomposition_template = """You are a helpful enterprise architect assistant that generates multiple sub-questions related to an input question. \n
|
203 |
+
The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n
|
204 |
+
Generate multiple search queries related to: {user_question} \n
|
205 |
+
Output (3 queries):"""
|
206 |
+
|
207 |
+
decomposition_answer_recursevely_template = """
|
208 |
+
Here is the question you need to answer:
|
209 |
+
|
210 |
+
\n --- \n {question} \n --- \n
|
211 |
+
|
212 |
+
Here is any available background question + answer pairs:
|
213 |
+
|
214 |
+
\n --- \n {q_a_pairs} \n --- \n
|
215 |
+
|
216 |
+
Here is additional context relevant to the question:
|
217 |
+
|
218 |
+
\n --- \n {context} \n --- \n
|
219 |
+
|
220 |
+
Use the above context and any background question + answer pairs to answer the question: \n {user_question}
|
221 |
+
"""
|
222 |
+
|
223 |
+
rag_fusion_questions_template = """You are a helpful enterprise architect assistant that generates multiple search queries based on a single input query. \n
|
224 |
+
Generate multiple search queries related to: {standalone_question} \n
|
225 |
+
Output (4 queries):"""
|
226 |
+
|
227 |
+
# Few Shot Examples
|
228 |
+
few_shot_step_back_examples = [
|
229 |
+
{
|
230 |
+
"input": "Could the members of The Police perform lawful arrests?",
|
231 |
+
"output": "what can the members of The Police do?",
|
232 |
+
},
|
233 |
+
{
|
234 |
+
"input": "Jan Sindel was born in what country?",
|
235 |
+
"output": "what is Jan Sindel personal history?",
|
236 |
+
},
|
237 |
+
]
|
238 |
+
# We now transform these to example messages
|
239 |
+
few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
|
240 |
+
[
|
241 |
+
("human", "{input}"),
|
242 |
+
("ai", "{output}"),
|
243 |
+
]
|
244 |
+
)
|
245 |
+
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
246 |
+
input_variables=["standalone_question"],
|
247 |
+
example_prompt=few_shot_step_back_examples_prompt,
|
248 |
+
examples=few_shot_step_back_examples,
|
249 |
+
)
|
250 |
+
few_shot_step_back_prompt = ChatPromptTemplate.from_messages(
|
251 |
+
[
|
252 |
+
(
|
253 |
+
"system",
|
254 |
+
"""You are an expert at enterprise architecture world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
|
255 |
+
),
|
256 |
+
# Few shot examples
|
257 |
+
few_shot_prompt,
|
258 |
+
# New question
|
259 |
+
("user", "{standalone_question}"),
|
260 |
+
]
|
261 |
+
)
|
262 |
+
# Response prompt
|
263 |
+
step_back_response_prompt_template = """You are an expert of enterprise architecture world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
|
264 |
+
|
265 |
+
# {normal_context}
|
266 |
+
# {step_back_context}
|
267 |
+
|
268 |
+
# Original Question: {standalone_question}
|
269 |
+
"""
|
270 |
+
|
271 |
+
# HyDE document generation
|
272 |
+
hyde_template = """Please write an architecture scientific passage to answer the question
|
273 |
+
Question: {standalone_question}
|
274 |
+
Passage:"""
|
275 |
+
|
276 |
+
# Retrieval APM Graph - TO BE REVIEWED
|
277 |
+
ROUTER_SYSTEM_PROMPT = """You are a LangChain Developer advocate. Your job is help people using LangChain answer any issues they are running into.
|
278 |
+
|
279 |
+
A user will come to you with an inquiry. Your first job is to classify what type of inquiry it is. The types of inquiries you should classify it as are:
|
280 |
+
|
281 |
+
## `more-info`
|
282 |
+
Classify a user inquiry as this if you need more information before you will be able to help them. Examples include:
|
283 |
+
- The user complains about an error but doesn't provide the error
|
284 |
+
- The user says something isn't working but doesn't explain why/how it's not working
|
285 |
+
|
286 |
+
## `langchain`
|
287 |
+
Classify a user inquiry as this if it can be answered by looking up information related to LangChain open source package. The LangChain open source package \
|
288 |
+
is a python library for working with LLMs. It integrates with various LLMs, databases and APIs.
|
289 |
+
|
290 |
+
## `general`
|
291 |
+
Classify a user inquiry as this if it is just a general question"""
|
292 |
+
|
ea4all/src/ea4all_apm/state.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""State management for the APM graph.
|
2 |
+
|
3 |
+
This module defines the state structures used in the APM graph. It includes
|
4 |
+
definitions for agent state, input state, and router classification schema.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from dataclasses import dataclass, field
|
8 |
+
from typing import Optional, Literal, List, Tuple
|
9 |
+
from typing_extensions import TypedDict
|
10 |
+
|
11 |
+
# Optional, the InputState is a restricted version of the State that is used to
|
12 |
+
# define a narrower interface to the outside world vs. what is maintained
|
13 |
+
# internally.
|
14 |
+
@dataclass(kw_only=True)
|
15 |
+
class InputState:
|
16 |
+
"""Represents the input state for the agent.
|
17 |
+
|
18 |
+
This class defines the structure of the input state, which includes
|
19 |
+
the messages exchanged between the user and the agent. It serves as
|
20 |
+
a restricted version of the full State, providing a narrower interface
|
21 |
+
to the outside world compared to what is maintained internally.
|
22 |
+
"""
|
23 |
+
|
24 |
+
"""Attributes:
|
25 |
+
question: user question
|
26 |
+
"""
|
27 |
+
question: str
|
28 |
+
safety_status: Optional[Tuple[str, str, str]] = None
|
29 |
+
|
30 |
+
"""Messages track the primary execution state of the agent.
|
31 |
+
|
32 |
+
Typically accumulates a pattern of Human/AI/Human/AI messages; if
|
33 |
+
you were to combine this template with a tool-calling ReAct agent pattern,
|
34 |
+
it may look like this:
|
35 |
+
|
36 |
+
1. HumanMessage - user input
|
37 |
+
2. AIMessage with .tool_calls - agent picking tool(s) to use to collect
|
38 |
+
information
|
39 |
+
3. ToolMessage(s) - the responses (or errors) from the executed tools
|
40 |
+
|
41 |
+
(... repeat steps 2 and 3 as needed ...)
|
42 |
+
4. AIMessage without .tool_calls - agent responding in unstructured
|
43 |
+
format to the user.
|
44 |
+
|
45 |
+
5. HumanMessage - user responds with the next conversational turn.
|
46 |
+
|
47 |
+
(... repeat steps 2-5 as needed ... )
|
48 |
+
|
49 |
+
Merges two lists of messages, updating existing messages by ID.
|
50 |
+
|
51 |
+
By default, this ensures the state is "append-only", unless the
|
52 |
+
new message has the same ID as an existing message.
|
53 |
+
|
54 |
+
Returns:
|
55 |
+
A new list of messages with the messages from `right` merged into `left`.
|
56 |
+
If a message in `right` has the same ID as a message in `left`, the
|
57 |
+
message from `right` will replace the message from `left`."""
|
58 |
+
|
59 |
+
|
60 |
+
class Router(TypedDict):
|
61 |
+
"""Classify a user query."""
|
62 |
+
logic: str
|
63 |
+
datasource: Optional[Literal["vectorstore", "websearch"]] = None
|
64 |
+
|
65 |
+
@dataclass(kw_only=True)
|
66 |
+
class APMState(InputState):
|
67 |
+
"""State of the APM graph / agent."""
|
68 |
+
|
69 |
+
"""
|
70 |
+
safety_status: user question's safeguarding status, justification, rephrased question
|
71 |
+
router: classification of the user's query
|
72 |
+
source: RAG or websearch
|
73 |
+
web_search: whether to add search
|
74 |
+
retrieved: list of documents retrieved by the retriever
|
75 |
+
rag: last RAG approach used
|
76 |
+
chat_memory: user chat memory
|
77 |
+
generation: should the agent generate a response
|
78 |
+
documents: list of documents retrieved by the retriever
|
79 |
+
"""
|
80 |
+
router: Optional[Router] = None
|
81 |
+
source: Optional[str] = None
|
82 |
+
rag: Optional[str] = None
|
83 |
+
web_search: Optional[str] = None
|
84 |
+
chat_memory: Optional[str] = None
|
85 |
+
generation: Optional[str] = None
|
86 |
+
retrieved: Optional[List[str]] = None
|
87 |
+
documents: Optional[List[str]] = None
|
ea4all/src/ea4all_gra/configuration.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Define the configurable parameters for the TOGAF agent."""
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
from dataclasses import dataclass, field
|
6 |
+
from typing import Annotated
|
7 |
+
|
8 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
9 |
+
|
10 |
+
@dataclass(kw_only=True)
|
11 |
+
class AgentConfiguration(BaseConfiguration):
|
12 |
+
"""The configuration for the agent."""
|
13 |
+
|
14 |
+
supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
15 |
+
default="gpt-4o-mini",
|
16 |
+
metadata={
|
17 |
+
"description": "The language model used for supervisor agents. Should be in the form: provider/model-name."
|
18 |
+
},
|
19 |
+
)
|
20 |
+
togaf_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
21 |
+
default="meta-llama/Llama-3.3-70B-Instruct",
|
22 |
+
metadata={
|
23 |
+
"description": "The language model used for processing and refining queries. Should be in the form: provider/model-name."
|
24 |
+
},
|
25 |
+
)
|
26 |
+
|
27 |
+
recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "integer"}}] = field(
|
28 |
+
default=10,
|
29 |
+
metadata={
|
30 |
+
"description": "The maximum number of times the agent can recursively call itself."
|
31 |
+
},
|
32 |
+
)
|
33 |
+
|
34 |
+
dbr_mock: Annotated[str, {"__template_metadata__": {"kind": "dataset"}}] = field(
|
35 |
+
default="dbr.txt",
|
36 |
+
metadata={
|
37 |
+
"description": "The EA4ALL Togal Business Requirement mock content."
|
38 |
+
},
|
39 |
+
)
|
40 |
+
|
41 |
+
ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
|
42 |
+
default="Frontend",
|
43 |
+
metadata={
|
44 |
+
"description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
|
45 |
+
},
|
46 |
+
)
|
ea4all/src/ea4all_gra/data.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
from pydantic import BaseModel, Field
|
3 |
+
|
4 |
+
"""
|
5 |
+
This module defines the data models used in the EA4ALL TOGAF project.
|
6 |
+
The data models include:
|
7 |
+
- Capability: Represents a business capability.
|
8 |
+
- BusinessCapability: Represents a list of required business capabilities.
|
9 |
+
- Requirement: Represents a business requirement.
|
10 |
+
- ListRequirement: Represents a list of identified business requirements.
|
11 |
+
- Objective: Represents a business objective.
|
12 |
+
- ListObjective: Represents a list of business objectives.
|
13 |
+
- UseCase: Represents a use case describing user interactions with the system.
|
14 |
+
- UserJourney: Represents a list of user journeys.
|
15 |
+
- StakeholderMap: Represents a business stakeholder.
|
16 |
+
- StakeholderList: Represents a list of business stakeholders.
|
17 |
+
- IdentifiedApp: Represents an identified application.
|
18 |
+
- LandscapeAsIs: Represents a list of applications to address a business query.
|
19 |
+
- CapabilityAsIs: Represents the support status of a business capability.
|
20 |
+
- CapabilityGap: Represents a list of capability support statuses.
|
21 |
+
- GradeAnswer: Represents a binary score for relevance check on retrieved applications.
|
22 |
+
- GradeHallucinations: Represents a binary score for hallucination present in generation answer.
|
23 |
+
- GradeDocuments: Represents a binary score for relevance check on retrieved applications.
|
24 |
+
- Principles: Represents the business, architecture, and technology principles.
|
25 |
+
- GradeBusinessQueryAnswer: Represents a binary score for quality check on business query.
|
26 |
+
"""
|
27 |
+
|
28 |
+
|
29 |
+
class Capability(BaseModel):
|
30 |
+
"""Business capability"""
|
31 |
+
capability: str = Field(description="Business capability name.")
|
32 |
+
|
33 |
+
class BusinessCapability(BaseModel):
|
34 |
+
"""List of required business capabilities."""
|
35 |
+
capabilities: Optional[List[Capability]]
|
36 |
+
|
37 |
+
class Requirement(BaseModel):
|
38 |
+
"""Business requirement."""
|
39 |
+
category: str = Field(description="Business requirement should be functional or non-functional")
|
40 |
+
requirement: str = Field(description="Business requirement description.")
|
41 |
+
|
42 |
+
class ListRequirement(BaseModel):
|
43 |
+
"""List of identified business requirements."""
|
44 |
+
requirements: Optional[List[Requirement]]
|
45 |
+
|
46 |
+
class Objective(BaseModel):
|
47 |
+
"""Business Objective"""
|
48 |
+
objective: str = Field(title=None, description="Business objective.")
|
49 |
+
|
50 |
+
class ListObjective(BaseModel):
|
51 |
+
"""List of business objectives."""
|
52 |
+
objectives: Optional[List[Objective]]
|
53 |
+
|
54 |
+
class UseCase(BaseModel):
|
55 |
+
"""Use case describing who (actor,user,persona) does what (interaction) with the system, for what purpose (goal), without dealing with system internals."""
|
56 |
+
persona: str = Field(description="User, actor or personna who interacts with the system.")
|
57 |
+
step: str = Field(description="Action executed by user.")
|
58 |
+
goal: str = Field(description="Purpose, goal of a step executed by user.")
|
59 |
+
|
60 |
+
class UserJourney(BaseModel):
|
61 |
+
"""List of user journey."""
|
62 |
+
userjourney: Optional[List[UseCase]]
|
63 |
+
|
64 |
+
class StakeholderMap(BaseModel):
|
65 |
+
"""Business stakeholder."""
|
66 |
+
stakeholder: str = Field(description="Stakeholder name.")
|
67 |
+
role: str = Field(description="Stakeholder role.")
|
68 |
+
concern: str = Field(description="Stakeholder concern.")
|
69 |
+
|
70 |
+
class StakeholderList(BaseModel):
|
71 |
+
"""List of business stakeholders."""
|
72 |
+
stakeholders: Optional[List[StakeholderMap]]
|
73 |
+
|
74 |
+
#Task-2
|
75 |
+
class IdentifiedApp(BaseModel):
|
76 |
+
"""Identified application"""
|
77 |
+
application: str = Field(description="Application name")
|
78 |
+
description: str = Field(description="Application description")
|
79 |
+
capability: list = Field(description="Business capabilities supported")
|
80 |
+
businessFit: str = Field(description="how well application support current business need")
|
81 |
+
technicalFit: str = Field(description="application alignment with technology strategy")
|
82 |
+
roadmap: str = Field(description="application portfolio strategy")
|
83 |
+
|
84 |
+
class LandscapeAsIs(BaseModel):
|
85 |
+
"""List of applications to address a business query."""
|
86 |
+
identified_asis: Optional[List[IdentifiedApp]]
|
87 |
+
|
88 |
+
class CapabilityAsIs(BaseModel):
|
89 |
+
"""Business capability support"""
|
90 |
+
capability: str = Field(description="business capability definition")
|
91 |
+
support: bool = Field(description="capability support status")
|
92 |
+
|
93 |
+
class CapabilityGap(BaseModel):
|
94 |
+
"""List of capabilities support status"""
|
95 |
+
capability_status: Optional[List[CapabilityAsIs]]
|
96 |
+
|
97 |
+
class GradeAnswer(BaseModel):
|
98 |
+
"""Binary score for relevance check on retrieved applications."""
|
99 |
+
|
100 |
+
binary_score: str = Field(...,
|
101 |
+
description="Relevance of retrieved applications to the business query, 'yes' or 'no'"
|
102 |
+
)
|
103 |
+
|
104 |
+
class GradeHallucinations(BaseModel):
|
105 |
+
"""Binary score for hallucination present in generation answer."""
|
106 |
+
|
107 |
+
binary_score: bool = Field(
|
108 |
+
description="Answer is grounded in the facts, 'yes' or 'no'"
|
109 |
+
)
|
110 |
+
|
111 |
+
class GradeDocuments(BaseModel):
|
112 |
+
"""Binary score for relevance check on retrieved applications."""
|
113 |
+
|
114 |
+
binary_score: str = Field(
|
115 |
+
description="Applications support the business capability, 'yes' or 'no'"
|
116 |
+
)
|
117 |
+
|
118 |
+
#Task-3
|
119 |
+
class Principles(BaseModel):
|
120 |
+
"""Describe the business, archirecture and technology principles"""
|
121 |
+
architecture: list = Field(description="Name and description of an architecture principle")
|
122 |
+
business: list = Field(description="Name and description of a business principle")
|
123 |
+
technology: list = Field(description="Name and description of a technology principle")
|
124 |
+
|
125 |
+
#Togaf-Agentic-Workflow
|
126 |
+
class GradeBusinessQueryAnswer(BaseModel):
|
127 |
+
"""Binary score for quality check on business query."""
|
128 |
+
|
129 |
+
binary_score: str = Field(
|
130 |
+
description="Business Query is well-described, 'yes' or 'no'"
|
131 |
+
)
|
ea4all/src/ea4all_gra/graph.py
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module contains the implementation of a Togaf reference architecture graph.
|
3 |
+
The graph represents a workflow for managing a conversation between team members
|
4 |
+
in the context of Togaf, a framework for enterprise architecture development.
|
5 |
+
|
6 |
+
The graph is defined using the StateGraph class from the langgraph library.
|
7 |
+
It consists of several nodes, each representing a specific task or action in the workflow.
|
8 |
+
The nodes are connected by edges, which control the flow of logic through the program.
|
9 |
+
|
10 |
+
The main entry point of the graph is the "ask_human" node, which prompts the user to provide
|
11 |
+
a business requirement document/file name. The input is then passed to the "enter_graph" node,
|
12 |
+
which initializes the state of the graph with the provided input.
|
13 |
+
|
14 |
+
The graph then proceeds to the "query_grader" node, which evaluates the quality of the business query.
|
15 |
+
Based on the evaluation, the graph branches to different nodes, such as "assess_query", "assess_asis",
|
16 |
+
and "generate_tobe", each representing a different task in the Togaf workflow.
|
17 |
+
|
18 |
+
The "togaf_supervisor" node acts as a router, determining the next role to act based on the conversation
|
19 |
+
and instructions. It uses an LLM (Learned Language Model) model to make the decision.
|
20 |
+
|
21 |
+
The graph continues to execute the tasks until it reaches the "return" node, which generates a response
|
22 |
+
to be returned to the user.
|
23 |
+
|
24 |
+
The graph is compiled and saved as a Togaf_reference_architecture_graph object, which can be executed
|
25 |
+
to run the workflow.
|
26 |
+
|
27 |
+
The module also includes helper functions and utility classes used by the graph, as well as import statements
|
28 |
+
for required libraries and modules.
|
29 |
+
"""
|
30 |
+
|
31 |
+
#core libraries
|
32 |
+
from langchain_core.runnables import RunnableConfig
|
33 |
+
from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
|
34 |
+
from langchain_core.prompts import ChatPromptTemplate
|
35 |
+
from langchain_core.runnables import RunnableConfig
|
36 |
+
from langchain_core.runnables.history import RunnableLambda
|
37 |
+
from langchain_core.messages import (
|
38 |
+
AIMessage,
|
39 |
+
SystemMessage,
|
40 |
+
HumanMessage,
|
41 |
+
)
|
42 |
+
from langchain_core.output_parsers import (
|
43 |
+
PydanticOutputParser,
|
44 |
+
JsonOutputKeyToolsParser
|
45 |
+
)
|
46 |
+
from langgraph.graph import (
|
47 |
+
END,
|
48 |
+
StateGraph,
|
49 |
+
)
|
50 |
+
from langgraph.types import Command, interrupt
|
51 |
+
from langgraph.checkpoint.memory import MemorySaver
|
52 |
+
|
53 |
+
from langchain import hub
|
54 |
+
|
55 |
+
import functools
|
56 |
+
|
57 |
+
from typing import List, Union, Dict
|
58 |
+
from typing_extensions import Literal
|
59 |
+
|
60 |
+
from ea4all.src.ea4all_gra.configuration import AgentConfiguration
|
61 |
+
from ea4all.src.ea4all_gra.state import TogafState
|
62 |
+
from ea4all.src.ea4all_gra.data import (
|
63 |
+
GradeBusinessQueryAnswer
|
64 |
+
)
|
65 |
+
|
66 |
+
from ea4all.src.shared.utils import (
|
67 |
+
get_llm_client,
|
68 |
+
clean_and_load_json,
|
69 |
+
extract_response_from_backticks,
|
70 |
+
load_mock_content,
|
71 |
+
)
|
72 |
+
from ea4all.src.shared.prompts import LLAMA31_PROMPT_FORMAT
|
73 |
+
|
74 |
+
from ea4all.src.ea4all_gra.togaf_task1.graph import task1_graph
|
75 |
+
from ea4all.src.ea4all_gra.togaf_task2.graph import task2_graph
|
76 |
+
from ea4all.src.ea4all_gra.togaf_task3.graph import task3_graph
|
77 |
+
|
78 |
+
from ea4all.src.ea4all_gra.utils import (
|
79 |
+
AsyncInterruptHandler
|
80 |
+
)
|
81 |
+
|
82 |
+
#CAPTURE business requirement asking for USER input & call togad_agentic workflow
|
83 |
+
async def _get_user_input():
|
84 |
+
|
85 |
+
interrupt_handler = AsyncInterruptHandler()
|
86 |
+
result = await interrupt_handler.handle_interrupt()
|
87 |
+
|
88 |
+
return {"user_feedback": result}
|
89 |
+
|
90 |
+
async def togaf_ask_human(state: TogafState, config: RunnableConfig) -> dict:
|
91 |
+
# Check user_input method
|
92 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
93 |
+
|
94 |
+
if "interrupt" in (AgentConfiguration.ea4all_ask_human, configuration.ea4all_ask_human):
|
95 |
+
print("--- TOGAF Blueprint Team --- User input requested")
|
96 |
+
response = interrupt(
|
97 |
+
{
|
98 |
+
"task": state['messages'][-1].content,
|
99 |
+
"content": "Please provide your business requirement in the form of document/file name or past the content:",
|
100 |
+
"optional": False
|
101 |
+
},
|
102 |
+
)
|
103 |
+
|
104 |
+
print(f"--- TOGAF AGENTIC team --- got an answer and processing user input: {response}")
|
105 |
+
|
106 |
+
business_query = load_mock_content(response['user_feedback'])
|
107 |
+
else:
|
108 |
+
business_query = state.get('business_query')
|
109 |
+
|
110 |
+
return Command(update={
|
111 |
+
#"messages": [
|
112 |
+
# HumanMessage(
|
113 |
+
# content=user_response, name="togaf_ask_human"
|
114 |
+
# )
|
115 |
+
#],
|
116 |
+
"business_query": business_query,
|
117 |
+
}
|
118 |
+
)
|
119 |
+
|
120 |
+
#DEFINE Helper functions
|
121 |
+
def create_team_supervisor(
|
122 |
+
state:Union[TogafState, Dict],
|
123 |
+
config:RunnableConfig) -> RunnableLambda:
|
124 |
+
members = ["AssessBusinessQuery", "AssessLandscape", "GenerateToBe"] #NEEDS REFACTORING
|
125 |
+
|
126 |
+
"""An LLM-based router."""
|
127 |
+
options = ["FINISH"] + members
|
128 |
+
function_def = {
|
129 |
+
"name": "route",
|
130 |
+
"description": "Select the next role.",
|
131 |
+
"parameters": {
|
132 |
+
"title": "routeSchema",
|
133 |
+
"type": "object",
|
134 |
+
"properties": {
|
135 |
+
"next": {
|
136 |
+
"type": "string",
|
137 |
+
"title": "Next",
|
138 |
+
"anyOf": [
|
139 |
+
{"enum": options},
|
140 |
+
],
|
141 |
+
},
|
142 |
+
},
|
143 |
+
"required": ["next"],
|
144 |
+
},
|
145 |
+
}
|
146 |
+
|
147 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
148 |
+
model = get_llm_client(
|
149 |
+
configuration.supervisor_model,
|
150 |
+
api_base_url="",
|
151 |
+
)
|
152 |
+
|
153 |
+
system_prompt = " ".join([
|
154 |
+
'You are a supervisor tasked with managing a conversation between the',
|
155 |
+
'following team members: {team_members}. Respond with the worker to act next in sequence.',
|
156 |
+
'Each worker will perform a task and respond with their results and status.',
|
157 |
+
'After last worker is finished,respond with FINISH.']
|
158 |
+
)
|
159 |
+
|
160 |
+
prompt = ChatPromptTemplate.from_messages(
|
161 |
+
[
|
162 |
+
("system", system_prompt),
|
163 |
+
MessagesPlaceholder(variable_name="messages"),
|
164 |
+
(
|
165 |
+
"system",
|
166 |
+
"Based on the above conversation and instructions who should act next."
|
167 |
+
"Or should we FINISH?. Select one of: {options}.",
|
168 |
+
),
|
169 |
+
]
|
170 |
+
).partial(options=str(options),team_members=", ".join(members))
|
171 |
+
|
172 |
+
return (
|
173 |
+
prompt
|
174 |
+
| model.bind_tools(tools=[function_def], tool_choice="route")
|
175 |
+
| JsonOutputKeyToolsParser(key_name='route', first_tool_only=True, return_only_args=True)
|
176 |
+
)
|
177 |
+
|
178 |
+
# The following functions interoperate between the top level graph state
|
179 |
+
# and the state of the sub-graph
|
180 |
+
# this makes it so that the states of each graph don't get intermixed
|
181 |
+
def task1_enter_chain(state:TogafState, members: List[str]):
|
182 |
+
results = {
|
183 |
+
"messages": [SystemMessage(content=str(state))],
|
184 |
+
"team_members": ", ".join(members),
|
185 |
+
"business_query": state.get('business_query'),
|
186 |
+
"next": state.get('next'),
|
187 |
+
}
|
188 |
+
return results
|
189 |
+
|
190 |
+
def task2_enter_chain(state:TogafState, members: List[str]):
|
191 |
+
results = {
|
192 |
+
"messages": [SystemMessage(content=str(state))],
|
193 |
+
"team_members": ", ".join(members),
|
194 |
+
"business_query": state.get('business_query'),
|
195 |
+
"intent": state.get('intent'),
|
196 |
+
"stakeholder": state.get('stakeholder'),
|
197 |
+
"biz_capability": state.get('biz_capability'),
|
198 |
+
"requirement": state.get('requirement'),
|
199 |
+
"userjourney": state.get('userjourney'),
|
200 |
+
"next": state.get('next')
|
201 |
+
}
|
202 |
+
return results
|
203 |
+
|
204 |
+
def task3_enter_chain(state:TogafState, members: List[str]):
|
205 |
+
results = {
|
206 |
+
"messages": [SystemMessage(content=str(state))],
|
207 |
+
"team_members": ", ".join(members),
|
208 |
+
"business_query": state.get('business_query'),
|
209 |
+
"intent": state.get('intent'),
|
210 |
+
"stakeholder": state.get('stakeholder'),
|
211 |
+
"biz_capability": state.get('biz_capability'),
|
212 |
+
"requirement": state.get('requirement'),
|
213 |
+
"userjourney": state.get('userjourney'),
|
214 |
+
"landscape_asis": state.get('landscape_asis'),
|
215 |
+
"identified_asis": state.get('identified_asis'),
|
216 |
+
"landscape_gap": state.get('landscape_gap'),
|
217 |
+
"next": state.get('next'),
|
218 |
+
}
|
219 |
+
return results
|
220 |
+
|
221 |
+
def get_last_message(state: TogafState) -> TogafState:
|
222 |
+
results = {}
|
223 |
+
results['messages'] = [state.get("messages")[-1].content]
|
224 |
+
results['next'] = state.get('next')
|
225 |
+
if state.get('business_query'):
|
226 |
+
results['business_query'] = state.get('business_query')
|
227 |
+
if state.get('principles'):
|
228 |
+
results['principles'] = state.get('principles')
|
229 |
+
if state.get('intent'):
|
230 |
+
results['intent'] = state.get('intent')
|
231 |
+
if state.get('stakeholder'):
|
232 |
+
results['stakeholder'] = state.get('stakeholder')
|
233 |
+
if state.get('biz_capability'):
|
234 |
+
results['biz_capability'] = state.get('biz_capability')
|
235 |
+
if state.get('requirement'):
|
236 |
+
results['requirement'] = state.get('requirement')
|
237 |
+
if state.get('userjourney'):
|
238 |
+
results['userjourney'] = state.get('userjourney')
|
239 |
+
if state.get('landscape_asis'):
|
240 |
+
results['landscape_asis'] = state.get('landscape_asis')
|
241 |
+
if state.get('identified_asis'):
|
242 |
+
results['identified_asis'] = state.get('identified_asis')
|
243 |
+
if state.get('landscape_gap'):
|
244 |
+
results['landscape_gap'] = state.get('landscape_gap')
|
245 |
+
if state.get('vision_target'):
|
246 |
+
results['vision_target'] = state.get('vision_target')
|
247 |
+
if state.get('architecture_runway'):
|
248 |
+
results['architecture_runway'] = state.get('architecture_runway')
|
249 |
+
|
250 |
+
return results
|
251 |
+
|
252 |
+
def join_graph(state: TogafState) -> TogafState:
|
253 |
+
results = {}
|
254 |
+
results['messages'] = [state.get("messages")[-1]]
|
255 |
+
results['next'] = state.get('next')
|
256 |
+
if state.get('business_query'):
|
257 |
+
results['business_query'] = state.get('business_query')
|
258 |
+
if state.get('principles'):
|
259 |
+
results['principles'] = state.get('principles')
|
260 |
+
if state.get('intent'):
|
261 |
+
results['intent'] = state.get('intent')
|
262 |
+
if state.get('stakeholder'):
|
263 |
+
results['stakeholder'] = state.get('stakeholder')
|
264 |
+
if state.get('biz_capability'):
|
265 |
+
results['biz_capability'] = state.get('biz_capability')
|
266 |
+
if state.get('requirement'):
|
267 |
+
results['requirement'] = state.get('requirement')
|
268 |
+
if state.get('userjourney'):
|
269 |
+
results['userjourney'] = state.get('userjourney')
|
270 |
+
if state.get('landscape_asis'):
|
271 |
+
results['landscape_asis'] = state.get('landscape_asis')
|
272 |
+
if state.get('identified_asis'):
|
273 |
+
results['identified_asis'] = state.get('identified_asis')
|
274 |
+
if state.get('landscape_gap'):
|
275 |
+
results['landscape_gap'] = state.get('identified_asis')
|
276 |
+
if state.get('vision_target'):
|
277 |
+
results['vision_target'] = state.get('vision_target')
|
278 |
+
if state.get('architecture_runway'):
|
279 |
+
results['architecture_runway'] = state.get('architecture_runway')
|
280 |
+
|
281 |
+
return results
|
282 |
+
|
283 |
+
##Refactored to use Command instead of conditional_edge
|
284 |
+
async def business_query_grader(state:TogafState, config:RunnableConfig) -> Command[Literal["togaf_supervisor", "return"]]:
|
285 |
+
print(f"--- TOGAF AGENTIC team --- safety/quality review of the user requirement: {state['messages'][-1].content}")
|
286 |
+
business_query = state.get('business_query')
|
287 |
+
|
288 |
+
#if len(business_query) < 50:
|
289 |
+
# return Command(
|
290 |
+
# # state update
|
291 |
+
# update={"query_status": False},
|
292 |
+
# # control flow
|
293 |
+
# goto="return",
|
294 |
+
# )
|
295 |
+
|
296 |
+
# Prompt
|
297 |
+
grader_prompt = hub.pull('learn-it-all-do-it-all/ea4all_business_query_grader')
|
298 |
+
|
299 |
+
# Set up a parser:
|
300 |
+
#parser = PydanticOutputParser(pydantic_object=GradeBusinessQueryAnswer)
|
301 |
+
#grader_prompt = grader_prompt.partial(
|
302 |
+
# format_instructions=parser.get_format_instructions(),
|
303 |
+
# ai_output = LLAMA31_PROMPT_FORMAT,
|
304 |
+
#)
|
305 |
+
|
306 |
+
# Get any user-provided configs - LLM model in use
|
307 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
308 |
+
model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
|
309 |
+
|
310 |
+
grader = grader_prompt | model
|
311 |
+
|
312 |
+
response = await grader.ainvoke(
|
313 |
+
{"business_query": state.get('business_query')}
|
314 |
+
)
|
315 |
+
|
316 |
+
binary_score = clean_and_load_json(extract_response_from_backticks(response.content))['binary_score']
|
317 |
+
|
318 |
+
messages = [
|
319 |
+
HumanMessage(content=state.get('business_query')),
|
320 |
+
]
|
321 |
+
|
322 |
+
if binary_score == "yes":
|
323 |
+
return Command(
|
324 |
+
# state update
|
325 |
+
update={"query_status": True, "messages": messages},
|
326 |
+
# control flow
|
327 |
+
goto="togaf_supervisor",
|
328 |
+
)
|
329 |
+
else:
|
330 |
+
return Command(
|
331 |
+
# state update
|
332 |
+
update={"query_status": False},
|
333 |
+
# control flow
|
334 |
+
goto="return",
|
335 |
+
)
|
336 |
+
|
337 |
+
def return_2user(state:TogafState):
|
338 |
+
message = '{"binary_score":"no"}'
|
339 |
+
|
340 |
+
return {
|
341 |
+
"messages": [AIMessage(content=str(message), name="return")],
|
342 |
+
"next": "end",
|
343 |
+
}
|
344 |
+
|
345 |
+
async def enter_graph(state:TogafState, config: RunnableConfig) -> dict:
|
346 |
+
|
347 |
+
print(f"--- Entered TOGAF AGENTIC team to --- {state['messages'][-1].content}")
|
348 |
+
#if isinstance(state, dict):
|
349 |
+
# user_feedback = state.get('user_feedback') if state.get('user_feedback') else state['messages'][-1].content
|
350 |
+
#else:
|
351 |
+
# user_feedback = getattr(state,'user_feedback', state['messages'][-1].content)
|
352 |
+
|
353 |
+
#busines_query = load_mock_content(state.get('user_feedback')),
|
354 |
+
|
355 |
+
return {**state, "business_query": state['messages'][-1].content}
|
356 |
+
|
357 |
+
## TOGAF Orchestrator Graph
|
358 |
+
task1_business_query_chain = (
|
359 |
+
functools.partial(task1_enter_chain, members=task1_graph.nodes) |
|
360 |
+
task1_graph
|
361 |
+
)
|
362 |
+
|
363 |
+
task2_assess_asis_chain = (
|
364 |
+
functools.partial(task2_enter_chain, members=task2_graph.nodes)
|
365 |
+
| task2_graph
|
366 |
+
)
|
367 |
+
|
368 |
+
task3_vision_target_chain = (
|
369 |
+
functools.partial(task3_enter_chain, members=task3_graph.nodes)
|
370 |
+
| task3_graph
|
371 |
+
)
|
372 |
+
|
373 |
+
# Define the graph.
|
374 |
+
workflow = StateGraph(TogafState, config_schema=AgentConfiguration)
|
375 |
+
# First add the nodes, which will do the work
|
376 |
+
workflow.add_node("enter_graph", enter_graph)
|
377 |
+
workflow.add_node("ask_human", togaf_ask_human)
|
378 |
+
workflow.add_node("query_grader", business_query_grader)
|
379 |
+
workflow.add_node("togaf_supervisor", create_team_supervisor)
|
380 |
+
workflow.add_node("assess_query", get_last_message | task1_business_query_chain | join_graph)
|
381 |
+
workflow.add_node("assess_asis", get_last_message | task2_assess_asis_chain | join_graph)
|
382 |
+
workflow.add_node("generate_tobe", get_last_message | task3_vision_target_chain | join_graph)
|
383 |
+
workflow.add_node("return", return_2user)
|
384 |
+
|
385 |
+
# Define the graph connections, which controls how the logic
|
386 |
+
# propagates through the program
|
387 |
+
workflow.add_conditional_edges(
|
388 |
+
"togaf_supervisor",
|
389 |
+
lambda x: x["next"],
|
390 |
+
{
|
391 |
+
"AssessBusinessQuery": "assess_query",
|
392 |
+
"AssessLandscape": "assess_asis",
|
393 |
+
"GenerateToBe": "generate_tobe",
|
394 |
+
"FINISH": END,
|
395 |
+
},
|
396 |
+
)
|
397 |
+
|
398 |
+
workflow.add_edge("enter_graph", "ask_human")
|
399 |
+
workflow.add_edge("ask_human", "query_grader")
|
400 |
+
workflow.add_edge("assess_query", "togaf_supervisor")
|
401 |
+
workflow.add_edge("assess_asis", "togaf_supervisor")
|
402 |
+
workflow.add_edge("generate_tobe", "togaf_supervisor")
|
403 |
+
workflow.add_edge("return", END)
|
404 |
+
|
405 |
+
workflow.set_entry_point("enter_graph")
|
406 |
+
|
407 |
+
#memory = MemorySaver()
|
408 |
+
togaf_graph = workflow.compile() #checkpointer=memory)
|
409 |
+
togaf_graph.name = "Togaf_reference_architecture_graph"
|
ea4all/src/ea4all_gra/state.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import Field
|
2 |
+
from typing_extensions import (
|
3 |
+
Annotated
|
4 |
+
)
|
5 |
+
import operator
|
6 |
+
from typing import (
|
7 |
+
Optional,
|
8 |
+
Annotated,
|
9 |
+
Sequence,
|
10 |
+
List
|
11 |
+
)
|
12 |
+
from dataclasses import dataclass, field
|
13 |
+
from typing import Optional
|
14 |
+
|
15 |
+
from langchain_core.messages import (
|
16 |
+
BaseMessage,
|
17 |
+
)
|
18 |
+
|
19 |
+
from langgraph.graph import MessagesState
|
20 |
+
|
21 |
+
from ea4all.src.ea4all_gra.data import (
|
22 |
+
ListRequirement,
|
23 |
+
ListObjective,
|
24 |
+
UserJourney,
|
25 |
+
StakeholderList,
|
26 |
+
BusinessCapability,
|
27 |
+
LandscapeAsIs,
|
28 |
+
CapabilityGap,
|
29 |
+
Principles
|
30 |
+
)
|
31 |
+
|
32 |
+
# Optional, the InputState is a restricted version of the State that is used to
|
33 |
+
# define a narrower interface to the outside world vs. what is maintained
|
34 |
+
# internally.
|
35 |
+
@dataclass(kw_only=True)
|
36 |
+
class InputState:
|
37 |
+
"""Represents the input state for the agent.
|
38 |
+
|
39 |
+
This class defines the structure of the input state, which includes
|
40 |
+
the messages exchanged between the user and the agent. It serves as
|
41 |
+
a restricted version of the full State, providing a narrower interface
|
42 |
+
to the outside world compared to what is maintained internally.
|
43 |
+
"""
|
44 |
+
|
45 |
+
"""Attributes:
|
46 |
+
business_query: a business requirement is the starting point of the TOGAF process
|
47 |
+
"""
|
48 |
+
business_query: str
|
49 |
+
|
50 |
+
# Task-2 Graph State
|
51 |
+
dataclass(kw_only=True)
|
52 |
+
class Task2State(InputState):
|
53 |
+
"""
|
54 |
+
Represents the state of our graph.
|
55 |
+
|
56 |
+
Attributes:
|
57 |
+
message: a message is added after each team member finishes
|
58 |
+
team_members: the team members are tracked so they are aware of the others' skill-sets
|
59 |
+
next: used to route work. The supervisor calls a function that will update this every time it makes a decision
|
60 |
+
business_query: identified business capabilities
|
61 |
+
landscape_asis: list of applications
|
62 |
+
identified_asis: LLM generation
|
63 |
+
capability: list of business capabilities required to support intent and requirements
|
64 |
+
landscape_gap: business capability support gap
|
65 |
+
"""
|
66 |
+
|
67 |
+
messages: Optional[Annotated[Sequence[BaseMessage], operator.add]] = None
|
68 |
+
team_members: Optional[List[str]] = None
|
69 |
+
business_query: Optional[str] = None
|
70 |
+
landscape_asis: Optional[List[str]] = None
|
71 |
+
identified_asis: Optional[LandscapeAsIs] = None
|
72 |
+
biz_capability: Optional[BusinessCapability] = None
|
73 |
+
landscape_gap: Optional[CapabilityGap] = None
|
74 |
+
next: Optional[str] = None
|
75 |
+
|
76 |
+
# Task-3 Graph State
|
77 |
+
dataclass(kw_only=True)
|
78 |
+
class Task3State(InputState):
|
79 |
+
"""
|
80 |
+
Represents the Reference Architecture state of our graph.
|
81 |
+
|
82 |
+
Attributes:
|
83 |
+
message: a message is added after each team member finishes
|
84 |
+
team_members: the team members are tracked so they are aware of the others' skill-sets
|
85 |
+
next: used to route work. The supervisor calls a function that will update this every time it makes a decision
|
86 |
+
business_query: business demand to be delivered
|
87 |
+
principles: list of principles to the architecture work
|
88 |
+
requirement: list of business requirements
|
89 |
+
intent: business objective, goal
|
90 |
+
user_journey: list of user journeys
|
91 |
+
stakeholder: list of stakeholder and their concerns
|
92 |
+
capability: list of business capabilities to deliver intent and requirements
|
93 |
+
landscape_asis: list of potential applications to support business query
|
94 |
+
identified_asis: identified target applications
|
95 |
+
landscape_gap: list of capabilities not supported by as-is landscape
|
96 |
+
"""
|
97 |
+
|
98 |
+
messages: Optional[Annotated[Sequence[BaseMessage], operator.add]] = None
|
99 |
+
team_members: Optional[List[str]] = None
|
100 |
+
stakeholder: Optional[StakeholderList] = None
|
101 |
+
principles: Optional[Principles] = None
|
102 |
+
requirement: Optional[ListRequirement] = None
|
103 |
+
intent: Optional[ListObjective] = None
|
104 |
+
userjourney: Optional[UserJourney] = None
|
105 |
+
biz_capability: Optional[BusinessCapability] = None
|
106 |
+
landscape_asis: Optional[List[str]] = None
|
107 |
+
identified_asis: Optional[LandscapeAsIs] = None
|
108 |
+
landscape_gap: Optional[CapabilityGap] = None
|
109 |
+
vision_target: Optional[str] = None
|
110 |
+
architecture_runway: Optional[str] = None
|
111 |
+
next: Optional[str] = None
|
112 |
+
|
113 |
+
# Top-level graph state
|
114 |
+
dataclass(kw_only=True)
|
115 |
+
class TogafState(MessagesState):
|
116 |
+
"""Represents the state of a Togaf system."""
|
117 |
+
|
118 |
+
"""
|
119 |
+
Attributes:
|
120 |
+
- user_feedback: used to capture additional information needed from the user by the graph
|
121 |
+
- business_query: a business requirement is the starting point of the TOGAF process
|
122 |
+
- query_status (Optional[bool]): Indicates the status of the query. Default value is False.
|
123 |
+
- messages (Optional[Annotated[list[AnyMessage], add_messages]]): A list of messages associated with the state.
|
124 |
+
- stakeholder (Optional[StakeholderList]): Represents the list of stakeholders.
|
125 |
+
- principles (Optional[Principles]): Represents the principles of the Togaf system.
|
126 |
+
- requirement (Optional[ListRequirement]): Represents the list of requirements.
|
127 |
+
- intent (Optional[ListObjective]): Represents the list of objectives.
|
128 |
+
- userjourney (Optional[UserJourney]): Represents the user journey of the Togaf system.
|
129 |
+
- biz_capability (Optional[BusinessCapability]): Represents the business capability of the Togaf system.
|
130 |
+
- landscape_asis (Optional[List[str]]): Represents the list of landscape as-is.
|
131 |
+
- identified_asis (Optional[LandscapeAsIs]): Represents the identified landscape as-is.
|
132 |
+
- landscape_gap (Optional[CapabilityGap]): Represents the capability gap of the landscape.
|
133 |
+
- vision_target (Optional[str]): Represents the vision target of the Togaf system.
|
134 |
+
- architecture_runway (Optional[str]): Represents the architecture runway of the Togaf system.
|
135 |
+
- next (Optional[str]): Represents the next step in the Togaf system.
|
136 |
+
"""
|
137 |
+
|
138 |
+
business_query: Optional[str] = None
|
139 |
+
query_status: Optional[bool] = field(default=False)
|
140 |
+
stakeholder: Optional[StakeholderList] = None
|
141 |
+
principles: Optional[Principles] = None
|
142 |
+
requirement: Optional[ListRequirement] = None
|
143 |
+
intent: Optional[ListObjective] = None
|
144 |
+
userjourney: Optional[UserJourney] = None
|
145 |
+
biz_capability: Optional[BusinessCapability] = None
|
146 |
+
landscape_asis: Optional[List[str]] = None
|
147 |
+
identified_asis: Optional[LandscapeAsIs] = None
|
148 |
+
landscape_gap: Optional[CapabilityGap] = None
|
149 |
+
vision_target: Optional[str] = None
|
150 |
+
architecture_runway: Optional[str] = None
|
151 |
+
next: Optional[str] = None
|
ea4all/src/ea4all_gra/togaf_task1/graph.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#core libraries
|
2 |
+
from langchain_core.runnables import RunnableConfig
|
3 |
+
from langchain_core.messages import (
|
4 |
+
AIMessage
|
5 |
+
)
|
6 |
+
|
7 |
+
from langgraph.graph import (
|
8 |
+
END,
|
9 |
+
StateGraph,
|
10 |
+
)
|
11 |
+
|
12 |
+
from ea4all.src.ea4all_gra.configuration import AgentConfiguration
|
13 |
+
from ea4all.src.ea4all_gra.data import (
|
14 |
+
ListRequirement,
|
15 |
+
ListObjective,
|
16 |
+
BusinessCapability,
|
17 |
+
StakeholderList,
|
18 |
+
UserJourney,
|
19 |
+
)
|
20 |
+
from ea4all.src.shared.utils import (
|
21 |
+
get_llm_client,
|
22 |
+
extract_detailed_business_requirements,
|
23 |
+
)
|
24 |
+
from ea4all.src.shared.prompts import LLAMA31_PROMPT_FORMAT
|
25 |
+
|
26 |
+
from ea4all.src.ea4all_gra.togaf_task1.state import Task1State
|
27 |
+
|
28 |
+
#EXECUTE STEP-1: Identify Business Requirements, Objectives, Capabilities, Stakeholders and Journey Agent
|
29 |
+
def assess_business_query(state: Task1State, config: RunnableConfig):
|
30 |
+
"""Identified business requirements, goals, use cases, user journey, stakeholder and business capability from a given business query."""
|
31 |
+
#DEFINE agent template & prompt
|
32 |
+
#BROKE-DOWN BusinessInput into individual extractions: LLAMA-3 CONTEXT WINDOW limitation
|
33 |
+
#REMOVED parser from the chain: LLAMA-3 returning text + ```BusinessInput```
|
34 |
+
##Parser back to chain 2024-10-13
|
35 |
+
#Setting streaming=True makes the model produces wrong output
|
36 |
+
query = getattr(state, "business_query")
|
37 |
+
|
38 |
+
# Get any user-provided configs - LLM model in use
|
39 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
40 |
+
model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
|
41 |
+
|
42 |
+
values = {"business_input": query}
|
43 |
+
final_response=[]
|
44 |
+
|
45 |
+
##EXTRACT BUSINESS REQUIREMENT
|
46 |
+
response = extract_detailed_business_requirements(model, ListRequirement, "business requirement", values)
|
47 |
+
business_reqs = ""
|
48 |
+
try:
|
49 |
+
for item in response.requirements:
|
50 |
+
business_reqs += ':'.join([item.category, item.requirement.lower()]) + ";"
|
51 |
+
final_response += [response.requirements]
|
52 |
+
except Exception as e:
|
53 |
+
print(f"Houston, we a {e} thorny problem!")
|
54 |
+
|
55 |
+
##EXTRACT BUSINESS OBJECTIVE
|
56 |
+
response = extract_detailed_business_requirements(model, ListObjective, "business objective", values)
|
57 |
+
business_goal=[]
|
58 |
+
try:
|
59 |
+
for item in response.objectives:
|
60 |
+
business_goal.append(item.objective)
|
61 |
+
final_response += [response.objectives]
|
62 |
+
except Exception as e:
|
63 |
+
print(f"Houston, we a {e} thorny problem!")
|
64 |
+
|
65 |
+
|
66 |
+
##EXTRACT BUSINESS CAPABILITY
|
67 |
+
response = extract_detailed_business_requirements(model, BusinessCapability, "business capabilities", values)
|
68 |
+
business_capabilities=[]
|
69 |
+
try:
|
70 |
+
for item in response.capabilities:
|
71 |
+
business_capabilities.append(item.capability)
|
72 |
+
final_response += [response.capabilities]
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Houston, we a {e} thorny problem!")
|
75 |
+
|
76 |
+
|
77 |
+
##EXTRACT STAKEHOLDER
|
78 |
+
response = extract_detailed_business_requirements(model, StakeholderList, "business stakeholder", values)
|
79 |
+
business_stakeholder = ""
|
80 |
+
try:
|
81 |
+
for item in response.stakeholders:
|
82 |
+
business_stakeholder += ' '.join([item.stakeholder,item.role.lower(), item.concern]) + "."
|
83 |
+
final_response += [response.stakeholders]
|
84 |
+
except Exception as e:
|
85 |
+
print(f"Houston, we a {e} thorny problem!")
|
86 |
+
|
87 |
+
|
88 |
+
##EXTRACT BUSINESS USER JOURNEY
|
89 |
+
response = extract_detailed_business_requirements(model, UserJourney, "user journey", values)
|
90 |
+
user_journey = ""
|
91 |
+
try:
|
92 |
+
for item in response.userjourney:
|
93 |
+
user_journey += ':'.join([item.persona,item.step.lower()]) + ","
|
94 |
+
final_response += [response.userjourney]
|
95 |
+
except Exception as e:
|
96 |
+
print(f"Houston, we a {e} thorny problem!")
|
97 |
+
|
98 |
+
name = getattr(state, "next")
|
99 |
+
|
100 |
+
return {
|
101 |
+
"messages": [AIMessage(content=str(final_response), name=name)],
|
102 |
+
"requirement": business_reqs,
|
103 |
+
"intent": str(business_goal),
|
104 |
+
"stakeholder": business_stakeholder,
|
105 |
+
"userjourney": user_journey,
|
106 |
+
"biz_capability": str(business_capabilities)
|
107 |
+
}
|
108 |
+
|
109 |
+
# Build graphs task1
|
110 |
+
## TASK-1 Graph
|
111 |
+
task1_builder = StateGraph(Task1State)
|
112 |
+
|
113 |
+
# Define the nodes
|
114 |
+
task1_builder.add_node("AssessBusinessQuery", assess_business_query) # assess business input
|
115 |
+
|
116 |
+
# Build graph
|
117 |
+
task1_builder.add_edge("AssessBusinessQuery", END)
|
118 |
+
task1_builder.set_entry_point("AssessBusinessQuery")
|
119 |
+
|
120 |
+
# Set FINISH node end-point
|
121 |
+
task1_builder.set_finish_point('AssessBusinessQuery')
|
122 |
+
|
123 |
+
# Compile
|
124 |
+
task1_graph = task1_builder.compile()
|
125 |
+
task1_graph.name = "togaf_assess_business_query_graph"
|
ea4all/src/ea4all_gra/togaf_task1/state.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import Field
|
2 |
+
from typing_extensions import (
|
3 |
+
Annotated
|
4 |
+
)
|
5 |
+
import operator
|
6 |
+
from typing import (
|
7 |
+
Optional,
|
8 |
+
Annotated,
|
9 |
+
Sequence,
|
10 |
+
List
|
11 |
+
)
|
12 |
+
from dataclasses import dataclass
|
13 |
+
from typing import Optional
|
14 |
+
|
15 |
+
from langchain_core.messages import (
|
16 |
+
BaseMessage,
|
17 |
+
)
|
18 |
+
|
19 |
+
from ea4all.src.ea4all_gra.data import (
|
20 |
+
ListRequirement,
|
21 |
+
ListObjective,
|
22 |
+
UserJourney,
|
23 |
+
StakeholderList,
|
24 |
+
BusinessCapability,
|
25 |
+
)
|
26 |
+
|
27 |
+
from ea4all.src.ea4all_gra.state import InputState
|
28 |
+
|
29 |
+
# Task-1 Graph State
|
30 |
+
@dataclass(kw_only=True)
|
31 |
+
class Task1State(InputState):
|
32 |
+
"""
|
33 |
+
Represents the BusinessOutput state of our graph.
|
34 |
+
|
35 |
+
Attributes:
|
36 |
+
message: a message is added after each team member finishes
|
37 |
+
team_members: the team members are tracked so they are aware of the others' skill-sets
|
38 |
+
next: used to route work. The supervisor calls a function that will update this every time it makes a decision
|
39 |
+
requirement: list of business requirements
|
40 |
+
intent: business objective, goal
|
41 |
+
userjourney: list of user journeys
|
42 |
+
stakeholder: list of stakeholder and their concerns
|
43 |
+
capability: list of business capabilities to deliver intent and requirements
|
44 |
+
"""
|
45 |
+
|
46 |
+
messages: Optional[Annotated[Sequence[BaseMessage], operator.add]] = None
|
47 |
+
team_members: Optional[List[str]] = None
|
48 |
+
requirement: Optional[ListRequirement] = None
|
49 |
+
intent: Optional[ListObjective] = None
|
50 |
+
userjourney: Optional[UserJourney] = None
|
51 |
+
stakeholder: Optional[StakeholderList] = Field(description="List of stakeholder", default="Not identified")
|
52 |
+
biz_capability: Optional[BusinessCapability] = None
|
53 |
+
next: Optional[str] = None
|
ea4all/src/ea4all_gra/togaf_task2/graph.py
ADDED
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
|
3 |
+
#core libraries
|
4 |
+
from langchain_core.runnables import RunnableConfig
|
5 |
+
from langchain_core.messages import (
|
6 |
+
AIMessage,
|
7 |
+
)
|
8 |
+
from langchain_core.output_parsers import PydanticOutputParser
|
9 |
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
10 |
+
|
11 |
+
from langchain import hub
|
12 |
+
|
13 |
+
from langgraph.graph import (
|
14 |
+
END,
|
15 |
+
StateGraph,
|
16 |
+
)
|
17 |
+
|
18 |
+
from ea4all.src.ea4all_gra.configuration import AgentConfiguration
|
19 |
+
from ea4all.src.ea4all_gra.data import (
|
20 |
+
CapabilityGap,
|
21 |
+
GradeAnswer,
|
22 |
+
GradeDocuments,
|
23 |
+
LandscapeAsIs,
|
24 |
+
)
|
25 |
+
|
26 |
+
from ea4all.src.shared.utils import (
|
27 |
+
get_llm_client,
|
28 |
+
extract_structured_output,
|
29 |
+
extract_topic_from_business_input,
|
30 |
+
set_max_new_tokens,
|
31 |
+
get_predicted_num_tokens_from_prompt,
|
32 |
+
)
|
33 |
+
|
34 |
+
from ea4all.src.shared.prompts import (
|
35 |
+
LLAMA31_CHAT_PROMPT_FORMAT,
|
36 |
+
LLAMA31_PROMPT_FORMAT,
|
37 |
+
)
|
38 |
+
|
39 |
+
from ea4all.src.shared import vectorstore
|
40 |
+
|
41 |
+
from ea4all.src.ea4all_gra.togaf_task2.state import Task2State
|
42 |
+
|
43 |
+
from ea4all.src.ea4all_apm.graph import get_retrieval_chain
|
44 |
+
from ea4all.src.ea4all_apm import configuration as apm_config
|
45 |
+
|
46 |
+
# Retrieval Grader score whether retrieved IT Landscape address business query
|
47 |
+
def retrieval_grader(model):
|
48 |
+
# LLM with function call
|
49 |
+
structured_llm_grader = model.with_structured_output(GradeDocuments)
|
50 |
+
|
51 |
+
#Prompt
|
52 |
+
system = """You are an enterprise architect grader assessing relevance of applications to address a business query.
|
53 |
+
It does not need to be a stringent test. The objective is to filter out erroneous retrievals.
|
54 |
+
If the application contains any keyword or semantic meaning related to the business query, grade it as relevant.
|
55 |
+
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
|
56 |
+
|
57 |
+
grade_prompt = ChatPromptTemplate.from_messages(
|
58 |
+
[
|
59 |
+
("system", system),
|
60 |
+
("ai", "Retrieved applications: \n\n {landscape_asis} \n\n Business Query: {business_query}"),
|
61 |
+
]
|
62 |
+
)
|
63 |
+
|
64 |
+
grader = grade_prompt | structured_llm_grader
|
65 |
+
|
66 |
+
return grader
|
67 |
+
|
68 |
+
# Business Capability needs vs landscap asis gap analysis
|
69 |
+
def gap_grader(model):
|
70 |
+
|
71 |
+
gap_prompt = hub.pull("learn-it-all-do-it-all/ea4all_togaf_capability_gap")
|
72 |
+
|
73 |
+
# Set up a parser:
|
74 |
+
parser = PydanticOutputParser(pydantic_object=CapabilityGap)
|
75 |
+
gap_prompt = gap_prompt.partial(
|
76 |
+
format_instructions=parser.get_format_instructions(),
|
77 |
+
)
|
78 |
+
|
79 |
+
capability_gap_grader = gap_prompt | model | parser
|
80 |
+
|
81 |
+
return capability_gap_grader
|
82 |
+
|
83 |
+
## Question Re-writer
|
84 |
+
def question_rewriter(model):
|
85 |
+
# Rerwriter Prompt
|
86 |
+
rewrite_prompt = hub.pull("learn-it-all-do-it-all/ea4all_question_rewriter")
|
87 |
+
rewrite_prompt = rewrite_prompt.partial(ai_output=LLAMA31_CHAT_PROMPT_FORMAT)
|
88 |
+
|
89 |
+
rewriter = rewrite_prompt | model
|
90 |
+
|
91 |
+
return rewriter
|
92 |
+
|
93 |
+
##Answer Grade: score whether RAG + LLM answer address business query
|
94 |
+
def answer_grader():
|
95 |
+
# Prompt
|
96 |
+
answer_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_answer_grade')
|
97 |
+
|
98 |
+
# Set up a parser:
|
99 |
+
parser = PydanticOutputParser(pydantic_object=GradeAnswer)
|
100 |
+
answer_prompt = answer_prompt.partial(
|
101 |
+
format_instructions=parser.get_format_instructions(),
|
102 |
+
ai_output = LLAMA31_PROMPT_FORMAT
|
103 |
+
)
|
104 |
+
|
105 |
+
return answer_prompt
|
106 |
+
|
107 |
+
## Hallucination Grader score whether there is any hallucination with between RAG and LLM answers
|
108 |
+
def hallucination_grader(asis, identified):
|
109 |
+
# Prompt": REVISED TO WORK WIHT LLAMA-3 - issue w/ multi-word app
|
110 |
+
#changes on prompting e.g. role, rules and restrictions, explicit instructions, change from word to application(s)
|
111 |
+
#changed to one-by-one assessment using single text search
|
112 |
+
grader_false = []
|
113 |
+
for d in identified:
|
114 |
+
if d.lower() not in asis.lower():
|
115 |
+
grader_false.append(d)
|
116 |
+
|
117 |
+
return grader_false
|
118 |
+
|
119 |
+
##Action-1 RAG retrieval - Assess-AsIs-Landscape
|
120 |
+
async def retrieve(state:Task2State, config: RunnableConfig):
|
121 |
+
"""
|
122 |
+
Retrieve applications
|
123 |
+
|
124 |
+
Args:
|
125 |
+
state (dict): The current graph state
|
126 |
+
|
127 |
+
Returns:
|
128 |
+
state (dict): New key added to state, applications, that contains retrieved identified applications
|
129 |
+
"""
|
130 |
+
|
131 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
132 |
+
|
133 |
+
print("---RETRIEVE---")
|
134 |
+
if getattr(state, 'landscape_asis'):
|
135 |
+
business_query = state['business_query']
|
136 |
+
else:
|
137 |
+
#Generate first business query - capture business input
|
138 |
+
try:
|
139 |
+
content = ast.literal_eval(state.messages[-1].content)
|
140 |
+
intent = content['intent'].lower()[1:-1].replace("'","")
|
141 |
+
except Exception as e:
|
142 |
+
# Falls here if the content is not a dict: Transform Query flow
|
143 |
+
intent = state.messages[-1].content
|
144 |
+
business_query=f"""What existent applications can be re-used {intent}?"""
|
145 |
+
|
146 |
+
# Retrieval
|
147 |
+
rag_input = 5
|
148 |
+
#faiss_index = set_faiss_index(config)
|
149 |
+
with vectorstore.make_retriever(config) as _retriever:
|
150 |
+
retriever = _retriever
|
151 |
+
|
152 |
+
retrieval = await get_retrieval_chain(rag_input,"ea4all_agent",business_query,retriever, config)
|
153 |
+
|
154 |
+
landscape_asis = await retrieval.ainvoke(
|
155 |
+
{"standalone_question": business_query},
|
156 |
+
config={"recursion_limit":configuration.ea4all_recursion_limit})
|
157 |
+
|
158 |
+
name = state.next
|
159 |
+
|
160 |
+
## return Document page_content
|
161 |
+
content = ';'.join(asis.page_content.strip() for asis in landscape_asis)
|
162 |
+
return {
|
163 |
+
"messages": [AIMessage(content=content, name=name)],
|
164 |
+
"landscape_asis": landscape_asis,
|
165 |
+
"business_query": business_query
|
166 |
+
}
|
167 |
+
|
168 |
+
##Action-2 Grade retrieval against business query, filter out not relevant applications
|
169 |
+
def grade_landscape_asis(state:Task2State, config: RunnableConfig):
|
170 |
+
"""
|
171 |
+
Determines whether an application is relevant to address a business query.
|
172 |
+
|
173 |
+
Args:
|
174 |
+
state (dict): The current graph state
|
175 |
+
|
176 |
+
Returns:
|
177 |
+
state (dict): Updates landscape_asis key with only filtered relevant applications
|
178 |
+
"""
|
179 |
+
|
180 |
+
print("---CHECK DOCUMENT RELEVANCE TO BUSINESS QUERY---")
|
181 |
+
business_query = getattr(state,'business_query')
|
182 |
+
landscape_asis = getattr(state,'landscape_asis')
|
183 |
+
|
184 |
+
# Score each doc
|
185 |
+
filtered_docs = []
|
186 |
+
for d in landscape_asis:
|
187 |
+
##Pick relevant Metadata
|
188 |
+
application = d.metadata['source']
|
189 |
+
capability = d.metadata['capability']
|
190 |
+
description = d.metadata['description']
|
191 |
+
business_fit = d.metadata['business fit']
|
192 |
+
roadmap = d.metadata['roadmap']
|
193 |
+
asis = f"Application:{application}; Capability:{capability}; Description:{description};Business fit: {business_fit}; Roadmap: {roadmap};"
|
194 |
+
|
195 |
+
filtered_docs.append(asis)
|
196 |
+
|
197 |
+
return {
|
198 |
+
#"messages": [AIMessage(content=str(filtered_docs), name=name)],
|
199 |
+
"business_query": business_query,
|
200 |
+
"landscape_asis": landscape_asis,
|
201 |
+
"identified_asis": filtered_docs
|
202 |
+
}
|
203 |
+
|
204 |
+
##Action-3 Is there relevant applications? Yes, generate, otherwise transform_query
|
205 |
+
def decide_to_generate(state:Task2State, config: RunnableConfig):
|
206 |
+
"""
|
207 |
+
Determines whether to generate an answer, or re-generate a question.
|
208 |
+
|
209 |
+
Args:
|
210 |
+
state (dict): The current graph state
|
211 |
+
|
212 |
+
Returns:
|
213 |
+
str: Binary decision for next node to call
|
214 |
+
"""
|
215 |
+
|
216 |
+
print("---ASSESS GRADED APPLICATIONS---")
|
217 |
+
filtered_applications = state.identified_asis
|
218 |
+
|
219 |
+
if not filtered_applications:
|
220 |
+
# All documents have been filtered check_relevance
|
221 |
+
# We will re-generate a new query
|
222 |
+
print(
|
223 |
+
"---DECISION: ALL APPLICATIONS ARE NOT RELEVANT TO BUSINESS QUERY, TRANSFORM QUERY---"
|
224 |
+
)
|
225 |
+
return "transform_query"
|
226 |
+
else:
|
227 |
+
# We have relevant documents, so generate answer
|
228 |
+
print("---DECISION: GENERATE---")
|
229 |
+
return "generate"
|
230 |
+
|
231 |
+
##Action-4a Generate if relevant applications found
|
232 |
+
def generate(state:Task2State, config: RunnableConfig):
|
233 |
+
"""
|
234 |
+
Generate answer
|
235 |
+
|
236 |
+
Args:
|
237 |
+
state (dict): The current graph state
|
238 |
+
|
239 |
+
Returns:
|
240 |
+
state (dict): New key
|
241 |
+
added to state, identified_asis, that contains LLM generation
|
242 |
+
"""
|
243 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
244 |
+
model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
|
245 |
+
|
246 |
+
print("---GENERATE---")
|
247 |
+
landscape_asis = state.landscape_asis
|
248 |
+
|
249 |
+
values = {
|
250 |
+
"business_query": state.business_query,
|
251 |
+
"applications": state.identified_asis
|
252 |
+
}
|
253 |
+
|
254 |
+
parser = PydanticOutputParser(pydantic_object=LandscapeAsIs)
|
255 |
+
|
256 |
+
hub_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_landscape_business_query')
|
257 |
+
hub_prompt = hub_prompt.partial(
|
258 |
+
format_instructions=parser.get_format_instructions(),
|
259 |
+
)
|
260 |
+
|
261 |
+
model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,hub_prompt,values))
|
262 |
+
|
263 |
+
task_2_generate = hub_prompt | model | parser
|
264 |
+
generated_asis = task_2_generate.invoke(input=values, config={"recursion_limit":configuration.recursion_limit})
|
265 |
+
|
266 |
+
name = state.next
|
267 |
+
|
268 |
+
return {
|
269 |
+
"messages": [AIMessage(content=str(generated_asis.identified_asis), name=name)],
|
270 |
+
"landscape_asis": landscape_asis,
|
271 |
+
"business_query": state.business_query,
|
272 |
+
"identified_asis": generated_asis.identified_asis
|
273 |
+
}
|
274 |
+
|
275 |
+
##Action-4b Re-write query otherwise
|
276 |
+
def transform_query(state:Task2State, config: RunnableConfig):
|
277 |
+
"""
|
278 |
+
Transform the query to produce a better question.
|
279 |
+
|
280 |
+
Args:
|
281 |
+
state (dict): The current graph state
|
282 |
+
|
283 |
+
Returns:
|
284 |
+
state (dict): Updates question key with a re-phrased question
|
285 |
+
"""
|
286 |
+
# Get any user-provided configs - LLM model in use
|
287 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
288 |
+
model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
|
289 |
+
|
290 |
+
print("---TRANSFORM QUERY---")
|
291 |
+
business_query = state.business_query
|
292 |
+
|
293 |
+
# Re-write question
|
294 |
+
response = question_rewriter(model).invoke(
|
295 |
+
{"user_question": business_query, "target": "vectorstore"},
|
296 |
+
)
|
297 |
+
|
298 |
+
generated_question = extract_topic_from_business_input(response.content)
|
299 |
+
better_question = generated_question['rephrased']
|
300 |
+
|
301 |
+
if better_question == None: better_question = business_query
|
302 |
+
|
303 |
+
name = state.next
|
304 |
+
|
305 |
+
return {
|
306 |
+
"messages": [AIMessage(content=better_question, name=name)],
|
307 |
+
"business_query": better_question
|
308 |
+
}
|
309 |
+
|
310 |
+
##Action-5 Grade final response
|
311 |
+
def grade_generation_v_documents_and_question(state:Task2State, config: RunnableConfig):
|
312 |
+
"""
|
313 |
+
Determines whether the generation is grounded in the landscape_asis and answers business query.
|
314 |
+
|
315 |
+
Args:
|
316 |
+
state (dict): The current graph state
|
317 |
+
|
318 |
+
Returns:
|
319 |
+
str: Decision for next node to call
|
320 |
+
"""
|
321 |
+
|
322 |
+
# Get any user-provided configs - LLM model in use
|
323 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
324 |
+
model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
|
325 |
+
|
326 |
+
|
327 |
+
print("---CHECK HALLUCINATIONS---")
|
328 |
+
business_query = state.business_query
|
329 |
+
landscape_asis = state.landscape_asis
|
330 |
+
identified_asis = state.identified_asis
|
331 |
+
generated_asis = [item.application for item in identified_asis]
|
332 |
+
|
333 |
+
score = hallucination_grader(str(landscape_asis),generated_asis)
|
334 |
+
|
335 |
+
if len(score)==0:
|
336 |
+
print("---DECISION: IDENTIFIED APPLICATION(s) IS GROUNDED IN LANDSCAPE ASIS---")
|
337 |
+
# Check question-answering
|
338 |
+
print("---GRADE GENERATION vs QUESTION---")
|
339 |
+
|
340 |
+
values = {"business_query": business_query, "application": identified_asis}
|
341 |
+
prompt = answer_grader()
|
342 |
+
model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,prompt,values))
|
343 |
+
grader_chain = prompt | model
|
344 |
+
score = grader_chain.invoke(values)
|
345 |
+
extracted_answer = extract_structured_output(score.content)
|
346 |
+
grade = extracted_answer['binary_score'] ##REVIEW PROMPT W/ LLAMA3.1-70B
|
347 |
+
if grade == "yes":
|
348 |
+
print("---DECISION: APPLICATION ADDRESSES BUSINESS QUERY---")
|
349 |
+
return "useful"
|
350 |
+
else:
|
351 |
+
print("---DECISION: APPLICATION DOES NOT ADDRESS BUSINESS QUERY---")
|
352 |
+
return "not useful"
|
353 |
+
else:
|
354 |
+
print("---DECISION: IDENTIFIED ASIS IS NOT GROUNDED IN LANDSCAPE ASIS, RE-TRY---")
|
355 |
+
print(f"---HALLUCINATIONS: {score}---")
|
356 |
+
return "not supported"
|
357 |
+
|
358 |
+
##Action-6 Analyse gap between current state and the desired future state - identified capabilities
|
359 |
+
def grade_landscape_asis_v_capability_gap(state:Task2State, config: RunnableConfig):
|
360 |
+
"""
|
361 |
+
Analyse any gap between existent applications and identified business capability to address the business query.
|
362 |
+
|
363 |
+
Args:
|
364 |
+
state (dict): The current graph state
|
365 |
+
|
366 |
+
Returns:
|
367 |
+
state (dict): Updates landscape_gap key with capability gap status
|
368 |
+
"""
|
369 |
+
|
370 |
+
# Get any user-provided configs - LLM model in use
|
371 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
372 |
+
model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
|
373 |
+
|
374 |
+
print("---CHECK SUPPORT IDENTIFIED APP TO BUSINESS CAPABILITY---")
|
375 |
+
|
376 |
+
parser = PydanticOutputParser(pydantic_object=CapabilityGap)
|
377 |
+
|
378 |
+
hub_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_capability_gap')
|
379 |
+
hub_prompt = hub_prompt.partial(
|
380 |
+
format_instructions=parser.get_format_instructions(),
|
381 |
+
)
|
382 |
+
task_2_landscape_gap = hub_prompt | model | parser
|
383 |
+
|
384 |
+
#capability_gap_grader
|
385 |
+
content = ';'.join(str(app) for app in state.identified_asis)
|
386 |
+
|
387 |
+
values = {
|
388 |
+
"application": content,
|
389 |
+
"capability": state.biz_capability[1:-1].replace("'",""),
|
390 |
+
}
|
391 |
+
|
392 |
+
model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,hub_prompt,values))
|
393 |
+
|
394 |
+
extracted_gap = task_2_landscape_gap.invoke(input=values, config={"recursion_limit":configuration.recursion_limit})
|
395 |
+
|
396 |
+
for item in extracted_gap.capability_status:
|
397 |
+
print(f"---CAPABILITY: {item.capability} SUPPORT: {item.support}---")
|
398 |
+
|
399 |
+
return {
|
400 |
+
"messages": [AIMessage(content=str(state.messages), name=state.next)],
|
401 |
+
"landscape_gap": extracted_gap #landscape_gap.content
|
402 |
+
}
|
403 |
+
|
404 |
+
##TASK-2 Graph
|
405 |
+
task2_builder = StateGraph(Task2State)
|
406 |
+
|
407 |
+
# Define the nodes
|
408 |
+
task2_builder.add_node("assess_landscape", retrieve) # retrieve
|
409 |
+
task2_builder.add_node("grade_landscape_asis", grade_landscape_asis) # grade documents
|
410 |
+
task2_builder.add_node("generate", generate) # generate
|
411 |
+
task2_builder.add_node("transform_query", transform_query) # transform_query
|
412 |
+
task2_builder.add_node("grade_landscape_gap", grade_landscape_asis_v_capability_gap) #analyse asis gap
|
413 |
+
|
414 |
+
# Build graph
|
415 |
+
task2_builder.set_entry_point("assess_landscape")
|
416 |
+
|
417 |
+
task2_builder.add_edge("assess_landscape", "grade_landscape_asis")
|
418 |
+
task2_builder.add_conditional_edges(
|
419 |
+
"grade_landscape_asis",
|
420 |
+
decide_to_generate,
|
421 |
+
{
|
422 |
+
"transform_query": "transform_query",
|
423 |
+
"generate": "generate",
|
424 |
+
},
|
425 |
+
)
|
426 |
+
task2_builder.add_edge("transform_query", "assess_landscape")
|
427 |
+
task2_builder.add_conditional_edges(
|
428 |
+
"generate",
|
429 |
+
grade_generation_v_documents_and_question,
|
430 |
+
{
|
431 |
+
"not supported": "generate",
|
432 |
+
"useful": "grade_landscape_gap",
|
433 |
+
"not useful": "transform_query",
|
434 |
+
},
|
435 |
+
)
|
436 |
+
|
437 |
+
task2_builder.add_edge("grade_landscape_gap", END)
|
438 |
+
|
439 |
+
# Compile
|
440 |
+
task2_graph = task2_builder.compile()
|
441 |
+
task2_graph.name = "Togaf_assess_asis_graph"
|
ea4all/src/ea4all_gra/togaf_task2/state.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing_extensions import (
|
2 |
+
Annotated
|
3 |
+
)
|
4 |
+
import operator
|
5 |
+
from typing import (
|
6 |
+
Optional,
|
7 |
+
Annotated,
|
8 |
+
Sequence,
|
9 |
+
List
|
10 |
+
)
|
11 |
+
from dataclasses import dataclass
|
12 |
+
|
13 |
+
from langchain_core.messages import (
|
14 |
+
BaseMessage
|
15 |
+
)
|
16 |
+
|
17 |
+
from ea4all.src.ea4all_gra.data import (
|
18 |
+
BusinessCapability,
|
19 |
+
CapabilityGap,
|
20 |
+
LandscapeAsIs,
|
21 |
+
)
|
22 |
+
|
23 |
+
from ea4all.src.ea4all_gra.state import InputState
|
24 |
+
|
25 |
+
# Task-2 Graph State
|
26 |
+
@dataclass(kw_only=True)
|
27 |
+
class Task2State(InputState):
|
28 |
+
"""
|
29 |
+
Represents the landscape assessement state of our graph.
|
30 |
+
|
31 |
+
Attributes:
|
32 |
+
message: a message is added after each team member finishes
|
33 |
+
team_members: the team members are tracked so they are aware of the others' skill-sets
|
34 |
+
next: used to route work. The supervisor calls a function that will update this every time it makes a decision
|
35 |
+
business_query: identified business capabilities
|
36 |
+
landscape_asis: list of applications
|
37 |
+
identified_asis: LLM generation
|
38 |
+
capability: list of business capabilities required to support intent and requirements
|
39 |
+
landscape_gap: business capability support gap
|
40 |
+
"""
|
41 |
+
|
42 |
+
messages: Optional[Annotated[Sequence[BaseMessage], operator.add]] = None
|
43 |
+
team_members: Optional[List[str]] = None
|
44 |
+
business_query: Optional[str] = None
|
45 |
+
landscape_asis: Optional[List[str]] = None
|
46 |
+
identified_asis: Optional[LandscapeAsIs] = None
|
47 |
+
biz_capability: Optional[BusinessCapability] = None
|
48 |
+
landscape_gap: Optional[CapabilityGap] = None
|
49 |
+
next: Optional[str] = None
|
ea4all/src/ea4all_gra/togaf_task3/graph.py
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from markdownify import markdownify as md
|
2 |
+
from graphviz import Source
|
3 |
+
|
4 |
+
#core libraries
|
5 |
+
from langchain_core.runnables import RunnableConfig, RunnableGenerator
|
6 |
+
from langchain_core.messages import (
|
7 |
+
AIMessage,
|
8 |
+
)
|
9 |
+
|
10 |
+
from langchain import hub
|
11 |
+
|
12 |
+
from langgraph.graph import (
|
13 |
+
END,
|
14 |
+
StateGraph,
|
15 |
+
)
|
16 |
+
|
17 |
+
from ea4all.src.ea4all_gra.configuration import AgentConfiguration
|
18 |
+
|
19 |
+
from ea4all.src.shared.utils import (
|
20 |
+
get_llm_client,
|
21 |
+
set_max_new_tokens,
|
22 |
+
get_predicted_num_tokens_from_prompt,
|
23 |
+
extract_detailed_business_requirements,
|
24 |
+
load_mock_content,
|
25 |
+
extract_principles,
|
26 |
+
)
|
27 |
+
|
28 |
+
from ea4all.src.ea4all_gra.data import (
|
29 |
+
Principles,
|
30 |
+
)
|
31 |
+
|
32 |
+
from ea4all.src.ea4all_gra.togaf_task3.state import Task3State
|
33 |
+
|
34 |
+
# Task-3: Generate reference architecture Vision and Target first iteration
|
35 |
+
def generate_principles(state: Task3State, config: RunnableConfig):
|
36 |
+
|
37 |
+
#Extract Business, Technnology and Architecture Principles
|
38 |
+
strategic_principles = load_mock_content('strategic_principles.txt') ##REFACTORING NEEDED
|
39 |
+
values = {"strategic_principles": md(strategic_principles)}
|
40 |
+
|
41 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
42 |
+
model = get_llm_client(
|
43 |
+
configuration.togaf_model,
|
44 |
+
api_base_url=configuration.api_base_url,
|
45 |
+
)
|
46 |
+
|
47 |
+
model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,extract_principles(Principles),values))
|
48 |
+
|
49 |
+
##RE-use business reqs extractor
|
50 |
+
identified_principles = extract_detailed_business_requirements(model, Principles, "architecture principles", md(strategic_principles))
|
51 |
+
|
52 |
+
name = getattr(state, 'next')
|
53 |
+
return {
|
54 |
+
"messages": [AIMessage(content=str(identified_principles), name=name)],
|
55 |
+
"principles": identified_principles,
|
56 |
+
"business_query": getattr(state, 'business_query'),
|
57 |
+
"business_goal": getattr(state, 'intent'),
|
58 |
+
"business_stakeholder": getattr(state, 'stakeholder'),
|
59 |
+
"biz_capability": getattr(state, 'biz_capability'),
|
60 |
+
"requirement": getattr(state, 'requirement'),
|
61 |
+
"user_journey": getattr(state, 'userjourney'),
|
62 |
+
"landscape_asis": getattr(state, 'landscape_asis'),
|
63 |
+
"identified_asis": getattr(state, 'identified_asis'),
|
64 |
+
"landscape_gap": getattr(state, 'landscape_gap')
|
65 |
+
}
|
66 |
+
|
67 |
+
async def stream_vision_target(state: Task3State, config: RunnableConfig):
|
68 |
+
##MAX_TOKENS OPTIMISATION 2024-07-08
|
69 |
+
|
70 |
+
async for s in state:
|
71 |
+
intent = getattr(s, 'intent')
|
72 |
+
stakeholder = getattr(s, 'stakeholder')
|
73 |
+
biz_capability = getattr(s, 'biz_capability')
|
74 |
+
requirement = getattr(s, 'requirement')
|
75 |
+
userjourney = getattr(s, 'userjourney')
|
76 |
+
identified_principles = getattr(s, 'principles')
|
77 |
+
landscape_asis = getattr(s, 'landscape_asis')
|
78 |
+
identified_asis = getattr(s, 'identified_asis')
|
79 |
+
landscape_gap = getattr(s, 'landscape_gap')
|
80 |
+
|
81 |
+
# Prompt
|
82 |
+
vision_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_vision_target')
|
83 |
+
|
84 |
+
values = {
|
85 |
+
"business_goal": intent,
|
86 |
+
"business_stakeholder": stakeholder,
|
87 |
+
"business_capability": biz_capability,
|
88 |
+
"principles": identified_principles,
|
89 |
+
"requirement": requirement,
|
90 |
+
"user_journey": userjourney,
|
91 |
+
"landscape_asis": landscape_asis,
|
92 |
+
"identified_asis": identified_asis,
|
93 |
+
"landscape_gap": landscape_gap
|
94 |
+
}
|
95 |
+
|
96 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
97 |
+
model = get_llm_client(
|
98 |
+
configuration.togaf_model,
|
99 |
+
api_base_url=configuration.api_base_url,
|
100 |
+
streaming=configuration.streaming,
|
101 |
+
)
|
102 |
+
|
103 |
+
model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,vision_prompt,values))
|
104 |
+
vision_chain = vision_prompt | model
|
105 |
+
|
106 |
+
async for output in vision_chain.astream(values):
|
107 |
+
yield(output)
|
108 |
+
|
109 |
+
async def generate_vision(state: Task3State):
|
110 |
+
"""
|
111 |
+
Generate answer
|
112 |
+
|
113 |
+
Args:
|
114 |
+
state (dict): The current graph state
|
115 |
+
|
116 |
+
Returns:
|
117 |
+
state (dict): New key added to state, generation, that contains LLM generation
|
118 |
+
"""
|
119 |
+
|
120 |
+
gen = RunnableGenerator(stream_vision_target).with_config(tags=["gra_stream"])
|
121 |
+
|
122 |
+
generation=""
|
123 |
+
async for message in gen.astream(state):
|
124 |
+
generation = ''.join([generation,message.content])
|
125 |
+
|
126 |
+
name = getattr(state, 'next')
|
127 |
+
|
128 |
+
return {
|
129 |
+
"messages": [AIMessage(content=generation, name=name)],
|
130 |
+
"principles": getattr(state, 'principles'),
|
131 |
+
"business_query": getattr(state, 'business_query'),
|
132 |
+
"intent": getattr(state, 'intent'),
|
133 |
+
"stakeholder": getattr(state, 'stakeholder'),
|
134 |
+
"biz_capability": getattr(state, 'biz_capability'),
|
135 |
+
"requirement": getattr(state, 'requirement'),
|
136 |
+
"userjourney": getattr(state, 'userjourney'),
|
137 |
+
"landscape_asis": getattr(state, 'landscape_asis'),
|
138 |
+
"identified_asis": getattr(state, 'identified_asis'),
|
139 |
+
"landscape_gap": getattr(state, 'landscape_gap'),
|
140 |
+
"vision_target": generation
|
141 |
+
}
|
142 |
+
|
143 |
+
def generate_architecture_runway(state: Task3State, config: RunnableConfig):
|
144 |
+
stakeholder = getattr(state, 'stakeholder')
|
145 |
+
biz_capability = getattr(state, 'biz_capability')
|
146 |
+
userjourney = getattr(state, 'userjourney')
|
147 |
+
identified_asis = getattr(state, 'identified_asis')
|
148 |
+
intent = getattr(state, 'intent')
|
149 |
+
|
150 |
+
# Prompt
|
151 |
+
runway_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_architecture_runway')
|
152 |
+
|
153 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
154 |
+
model = get_llm_client(
|
155 |
+
configuration.togaf_model,
|
156 |
+
api_base_url=configuration.api_base_url,
|
157 |
+
)
|
158 |
+
|
159 |
+
values = {
|
160 |
+
"stakeholder": stakeholder,
|
161 |
+
"business_capability": biz_capability,
|
162 |
+
"userjourney": userjourney,
|
163 |
+
"identified_asis": identified_asis,
|
164 |
+
"intent": intent,
|
165 |
+
}
|
166 |
+
|
167 |
+
##Issue w/ llama-3.2-vision and ChatOpenAI token limits
|
168 |
+
model.max_tokens = int((configuration.max_tokens - get_predicted_num_tokens_from_prompt(model,runway_prompt,values))*0.95)
|
169 |
+
|
170 |
+
vision_chain = runway_prompt | model
|
171 |
+
architecture_runway = vision_chain.invoke(input=values)
|
172 |
+
|
173 |
+
name = getattr(state, 'next')
|
174 |
+
|
175 |
+
return {
|
176 |
+
"messages": [AIMessage(content=architecture_runway.content, name=name)],
|
177 |
+
"architecture_runway": architecture_runway.content
|
178 |
+
}
|
179 |
+
|
180 |
+
def save_diagram(state: Task3State, config: RunnableConfig, fmt=["svg","png"]):
|
181 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
182 |
+
|
183 |
+
input_img = state.architecture_runway
|
184 |
+
output_img = "togaf_runway_" + str(config['configurable']['thread_id'])
|
185 |
+
|
186 |
+
try:
|
187 |
+
x=Source(
|
188 |
+
source=input_img,
|
189 |
+
filename=output_img,
|
190 |
+
format=fmt[0]
|
191 |
+
)
|
192 |
+
response = x.render(
|
193 |
+
cleanup=True,
|
194 |
+
directory=configuration.ea4all_images,
|
195 |
+
format=fmt[1],
|
196 |
+
view=False,
|
197 |
+
).replace('\\', '/')
|
198 |
+
except Exception as e:
|
199 |
+
response=f"Error: Agent couldn't parse the diagram at this time! {e} \n {input_img}"
|
200 |
+
|
201 |
+
return{"architecture_runway": response}
|
202 |
+
|
203 |
+
def generate_reference_architecture(state: Task3State):
|
204 |
+
##BY PASS to generate principles
|
205 |
+
return {
|
206 |
+
"business_query": getattr(state, 'business_query'),
|
207 |
+
"intent": getattr(state, 'intent'),
|
208 |
+
"stakeholder": getattr(state, 'stakeholder'),
|
209 |
+
"biz_capability": getattr(state, 'biz_capability'),
|
210 |
+
"requirement": getattr(state, 'requirement'),
|
211 |
+
"userjourney": getattr(state, 'userjourney'),
|
212 |
+
"landscape_asis": getattr(state, 'landscape_asis'),
|
213 |
+
"identified_asis": getattr(state, 'identified_asis'),
|
214 |
+
"landscape_gap": getattr(state, 'landscape_gap'),
|
215 |
+
}
|
216 |
+
|
217 |
+
async def stream_vision_target(state: Task3State, config: RunnableConfig):
|
218 |
+
##MAX_TOKENS OPTIMISATION 2024-07-08
|
219 |
+
|
220 |
+
async for s in state:
|
221 |
+
intent = getattr(s, 'intent')
|
222 |
+
stakeholder = getattr(s, 'stakeholder')
|
223 |
+
biz_capability = getattr(s, 'biz_capability')
|
224 |
+
requirement = getattr(s, 'requirement')
|
225 |
+
userjourney = getattr(s, 'userjourney')
|
226 |
+
identified_principles = getattr(s, 'principles')
|
227 |
+
landscape_asis = getattr(s, 'landscape_asis')
|
228 |
+
identified_asis = getattr(s, 'identified_asis')
|
229 |
+
landscape_gap = getattr(s, 'landscape_gap')
|
230 |
+
|
231 |
+
# Prompt
|
232 |
+
vision_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_vision_target')
|
233 |
+
|
234 |
+
values = {
|
235 |
+
"business_goal": intent,
|
236 |
+
"business_stakeholder": stakeholder,
|
237 |
+
"business_capability": biz_capability,
|
238 |
+
"principles": identified_principles,
|
239 |
+
"requirement": requirement,
|
240 |
+
"user_journey": userjourney,
|
241 |
+
"landscape_asis": landscape_asis,
|
242 |
+
"identified_asis": identified_asis,
|
243 |
+
"landscape_gap": landscape_gap
|
244 |
+
}
|
245 |
+
|
246 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
247 |
+
model = get_llm_client(
|
248 |
+
configuration.togaf_model,
|
249 |
+
api_base_url=configuration.api_base_url,
|
250 |
+
)
|
251 |
+
|
252 |
+
model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,vision_prompt,values))
|
253 |
+
vision_chain = vision_prompt | model
|
254 |
+
|
255 |
+
for output in vision_chain.stream(values):
|
256 |
+
yield(output)
|
257 |
+
|
258 |
+
##TASK-3 Graph
|
259 |
+
workflow = StateGraph(Task3State)
|
260 |
+
|
261 |
+
# Define the nodes
|
262 |
+
workflow.add_node("generate_reference_architecture", generate_reference_architecture) # business, technology, architecture principles
|
263 |
+
workflow.add_node("generate_principles", generate_principles) # business, technology, architecture principles
|
264 |
+
workflow.add_node("generate_vision_target", generate_vision) # architecture vision and target
|
265 |
+
workflow.add_node("generate_architecture_runway", generate_architecture_runway) # draw high-level diagram target state
|
266 |
+
workflow.add_node("save_diagram", save_diagram)
|
267 |
+
|
268 |
+
# Build graph
|
269 |
+
workflow.add_edge("generate_reference_architecture", "generate_principles")
|
270 |
+
workflow.add_edge("generate_principles", "generate_vision_target")
|
271 |
+
workflow.add_edge("generate_vision_target", "generate_architecture_runway")
|
272 |
+
workflow.add_edge("generate_architecture_runway","save_diagram")
|
273 |
+
workflow.add_edge("save_diagram", END)
|
274 |
+
|
275 |
+
#Entry point
|
276 |
+
workflow.set_entry_point("generate_reference_architecture")
|
277 |
+
|
278 |
+
# Compile
|
279 |
+
task3_graph = workflow.compile()
|
280 |
+
task3_graph.name = "Togaf_generate_tobe_graph"
|
ea4all/src/ea4all_gra/togaf_task3/state.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing_extensions import (
|
2 |
+
Annotated
|
3 |
+
)
|
4 |
+
import operator
|
5 |
+
from typing import (
|
6 |
+
Optional,
|
7 |
+
Annotated,
|
8 |
+
Sequence,
|
9 |
+
List
|
10 |
+
)
|
11 |
+
from dataclasses import dataclass
|
12 |
+
|
13 |
+
from langchain_core.messages import (
|
14 |
+
BaseMessage
|
15 |
+
)
|
16 |
+
|
17 |
+
from ea4all.src.ea4all_gra.data import (
|
18 |
+
BusinessCapability,
|
19 |
+
CapabilityGap,
|
20 |
+
LandscapeAsIs,
|
21 |
+
StakeholderList,
|
22 |
+
Principles,
|
23 |
+
ListObjective,
|
24 |
+
ListRequirement,
|
25 |
+
UserJourney,
|
26 |
+
)
|
27 |
+
|
28 |
+
from ea4all.src.ea4all_gra.state import InputState
|
29 |
+
|
30 |
+
# Task-3 Graph State
|
31 |
+
@dataclass(kw_only=True)
|
32 |
+
class Task3State(InputState):
|
33 |
+
"""
|
34 |
+
Represents the Reference Architecture state of our graph.
|
35 |
+
|
36 |
+
Attributes:
|
37 |
+
message: a message is added after each team member finishes
|
38 |
+
team_members: the team members are tracked so they are aware of the others' skill-sets
|
39 |
+
next: used to route work. The supervisor calls a function that will update this every time it makes a decision
|
40 |
+
business_query: business demand to be delivered
|
41 |
+
principles: list of principles to the architecture work
|
42 |
+
requirement: list of business requirements
|
43 |
+
intent: business objective, goal
|
44 |
+
user_journey: list of user journeys
|
45 |
+
stakeholder: list of stakeholder and their concerns
|
46 |
+
capability: list of business capabilities to deliver intent and requirements
|
47 |
+
landscape_asis: list of potential applications to support business query
|
48 |
+
identified_asis: identified target applications
|
49 |
+
landscape_gap: list of capabilities not supported by as-is landscape
|
50 |
+
"""
|
51 |
+
|
52 |
+
messages: Optional[Annotated[Sequence[BaseMessage], operator.add]] = None
|
53 |
+
team_members: Optional[List[str]] = None
|
54 |
+
business_query: Optional[str] = None
|
55 |
+
landscape_asis: Optional[List[str]] = None
|
56 |
+
identified_asis: Optional[LandscapeAsIs] = None
|
57 |
+
biz_capability: Optional[BusinessCapability] = None
|
58 |
+
landscape_gap: Optional[CapabilityGap] = None
|
59 |
+
stakeholder: Optional[StakeholderList] = None
|
60 |
+
principles: Optional[Principles] = None
|
61 |
+
requirement: Optional[ListRequirement] = None
|
62 |
+
intent: Optional[ListObjective] = None
|
63 |
+
userjourney: Optional[UserJourney] = None
|
64 |
+
vision_target: Optional[str] = None
|
65 |
+
architecture_runway: Optional[str] = None
|
66 |
+
next: Optional[str] = None
|
ea4all/src/ea4all_gra/utils.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
import asyncio
|
4 |
+
import threading
|
5 |
+
|
6 |
+
def assign_event_loop_to_thread():
|
7 |
+
"""
|
8 |
+
Explicitly assign a new event loop to the current thread
|
9 |
+
This method can be called at the start of thread-based operations
|
10 |
+
"""
|
11 |
+
try:
|
12 |
+
# Try to get the current event loop
|
13 |
+
loop = asyncio.get_event_loop()
|
14 |
+
except RuntimeError:
|
15 |
+
# If no event loop exists, create a new one
|
16 |
+
loop = asyncio.new_event_loop()
|
17 |
+
|
18 |
+
# Set the new event loop for the current thread
|
19 |
+
asyncio.set_event_loop(loop)
|
20 |
+
|
21 |
+
return loop
|
22 |
+
|
23 |
+
class AsyncInterruptHandler:
|
24 |
+
def __init__(self):
|
25 |
+
# Assign event loop at initialization
|
26 |
+
self.loop = assign_event_loop_to_thread()
|
27 |
+
self.input_queue = asyncio.Queue()
|
28 |
+
self.event = asyncio.Event()
|
29 |
+
|
30 |
+
self.user_feedback = None
|
31 |
+
self.interface = None
|
32 |
+
self.interface_thread = None
|
33 |
+
|
34 |
+
# Get or create the event loop
|
35 |
+
try:
|
36 |
+
self.loop = asyncio.get_event_loop()
|
37 |
+
except RuntimeError:
|
38 |
+
self.loop = asyncio.new_event_loop()
|
39 |
+
asyncio.set_event_loop(self.loop)
|
40 |
+
|
41 |
+
async def close_interface_with_timeout(self):
|
42 |
+
# Get the current thread's event loop
|
43 |
+
try:
|
44 |
+
print(f"NOT Closing interface: {self.interface.is_callable()}")
|
45 |
+
|
46 |
+
except asyncio.TimeoutError:
|
47 |
+
print("Interface closure timed out")
|
48 |
+
except Exception as e:
|
49 |
+
print(f"Error closing interface: {e}")
|
50 |
+
|
51 |
+
def launch_gradio_interface(self):
|
52 |
+
def run_interface():
|
53 |
+
# Explicitly assign event loop for this thread
|
54 |
+
assign_event_loop_to_thread()
|
55 |
+
|
56 |
+
# Get the current thread's event loop
|
57 |
+
current_loop = asyncio.get_event_loop()
|
58 |
+
|
59 |
+
try:
|
60 |
+
# Run the interface creation coroutine
|
61 |
+
current_loop.run_until_complete(self._create_and_launch_interface())
|
62 |
+
except Exception as e:
|
63 |
+
print(f"Error in run_interface: {e}")
|
64 |
+
|
65 |
+
# Create and start the thread
|
66 |
+
self.interface_thread = threading.Thread(target=run_interface, daemon=True)
|
67 |
+
self.interface_thread.start()
|
68 |
+
|
69 |
+
async def _create_and_launch_interface(self):
|
70 |
+
title = 'User Input'
|
71 |
+
description = 'Please provide input'
|
72 |
+
|
73 |
+
async def submit_input(user_feedback):
|
74 |
+
asyncio.run_coroutine_threadsafe(self.input_queue.put(user_feedback), self.loop)
|
75 |
+
self.event.set()
|
76 |
+
|
77 |
+
print(f"User Provided input: {user_feedback}")
|
78 |
+
|
79 |
+
return user_feedback
|
80 |
+
|
81 |
+
with gr.Blocks() as demo:
|
82 |
+
gr.Markdown(f"###{title}")
|
83 |
+
gr.Markdown(f"**{description}")
|
84 |
+
|
85 |
+
input_component = gr.Textbox(label="Your input")
|
86 |
+
submit_btn = gr.Button("Submit")
|
87 |
+
output = gr.Textbox(label="Status")
|
88 |
+
|
89 |
+
submit_btn.click(
|
90 |
+
submit_input,
|
91 |
+
inputs=input_component,
|
92 |
+
outputs=output
|
93 |
+
)
|
94 |
+
|
95 |
+
self.event = asyncio.Event()
|
96 |
+
self.event.clear()
|
97 |
+
self.user_feedback = None
|
98 |
+
|
99 |
+
self.interface = demo
|
100 |
+
self.interface.launch(inbrowser=True)
|
101 |
+
|
102 |
+
async def handle_interrupt(self):
|
103 |
+
self.launch_gradio_interface()
|
104 |
+
|
105 |
+
try:
|
106 |
+
# Use the current loop's queue and event
|
107 |
+
self.user_feedback = await asyncio.wait_for(
|
108 |
+
self.input_queue.get(),
|
109 |
+
timeout=300.0, # 5-minute timeout
|
110 |
+
)
|
111 |
+
|
112 |
+
print(f"Finished waiting for user input {self.user_feedback}")
|
113 |
+
|
114 |
+
return self.user_feedback
|
115 |
+
|
116 |
+
except asyncio.TimeoutError:
|
117 |
+
print("Gradio interface timed out")
|
118 |
+
return None
|
119 |
+
|
120 |
+
except Exception as e:
|
121 |
+
print(f"Error in handle_interrupt: {e}")
|
122 |
+
return None
|
123 |
+
|
124 |
+
finally:
|
125 |
+
await self.close_interface_with_timeout()
|
ea4all/src/ea4all_indexer/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Index Graph Module."""
|
2 |
+
|
3 |
+
#from ea4all_indexer.graph import ea4all_indexer
|
4 |
+
|
5 |
+
#__all__ = ["ea4all_indexer"]
|
ea4all/src/ea4all_indexer/configuration.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Define the configurable parameters for the index graph."""
|
2 |
+
|
3 |
+
from dataclasses import dataclass, field
|
4 |
+
|
5 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
6 |
+
|
7 |
+
# This file contains sample APPLICATIONS to index
|
8 |
+
DEFAULT_APM_CATALOGUE = "APM-ea4all (test-split).xlsx"
|
9 |
+
|
10 |
+
@dataclass(kw_only=True)
|
11 |
+
class IndexConfiguration(BaseConfiguration):
|
12 |
+
"""Configuration class for indexing and retrieval operations.
|
13 |
+
|
14 |
+
This class defines the parameters needed for configuring the indexing and
|
15 |
+
retrieval processes, including embedding model selection, retriever provider choice, and search parameters.
|
16 |
+
"""
|
17 |
+
apm_catalogue: str = field(
|
18 |
+
default=DEFAULT_APM_CATALOGUE,
|
19 |
+
metadata={
|
20 |
+
"description": "The EA4ALL APM default Vectorstore index name."
|
21 |
+
},
|
22 |
+
)
|
ea4all/src/ea4all_indexer/graph.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This "graph" simply exposes an endpoint for a user to upload docs to be indexed."""
|
2 |
+
"""Changelog: 2025-06-03
|
3 |
+
- Refactored code to fix problems with linter and type checking (Standard mode)
|
4 |
+
- Refactored to use langgraph state management for MCP compatibility.
|
5 |
+
- Enabled input BYOD (Bring Your Own Data) for indexing.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from typing import Optional
|
9 |
+
|
10 |
+
from langchain_core.runnables import RunnableConfig
|
11 |
+
from langgraph.graph import END, START, StateGraph
|
12 |
+
|
13 |
+
from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
|
14 |
+
from ea4all.src.ea4all_indexer.state import InputState, OutputState, OverallState
|
15 |
+
from ea4all.src.shared import vectorstore
|
16 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
17 |
+
|
18 |
+
async def index_docs(
|
19 |
+
state: InputState, *, config: RunnableConfig
|
20 |
+
) -> dict[str, str]:
|
21 |
+
"""Asynchronously index documents in the given state using the configured retriever.
|
22 |
+
|
23 |
+
This function takes the documents from the state, ensures they have a user ID,
|
24 |
+
adds them to the retriever's index, and then signals for the documents to be
|
25 |
+
deleted from the state.
|
26 |
+
|
27 |
+
If docs are not provided in the state, they will be loaded
|
28 |
+
from the configuration.docs_file JSON file.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
state (IndexState): The current state containing documents and retriever.
|
32 |
+
config (Optional[RunnableConfig]): Configuration for the indexing process.r
|
33 |
+
"""
|
34 |
+
if not config:
|
35 |
+
raise ValueError("Configuration required to run index_docs.")
|
36 |
+
|
37 |
+
#configuration = IndexConfiguration.from_runnable_config(config)
|
38 |
+
|
39 |
+
with vectorstore.make_retriever(config) as retriever:
|
40 |
+
if len(retriever.vectorstore.docstore._dict) == 0:
|
41 |
+
apm_docs = vectorstore.get_apm_excel_content(config, file=state.path)
|
42 |
+
await retriever.aadd_documents(apm_docs)
|
43 |
+
retriever.vectorstore.save_local(
|
44 |
+
folder_path=getattr(config, "ea4all_store", BaseConfiguration.ea4all_store),
|
45 |
+
index_name=getattr(config, "apm_faiss", BaseConfiguration.apm_faiss)
|
46 |
+
)
|
47 |
+
|
48 |
+
return {"docs": "delete"}
|
49 |
+
|
50 |
+
# Define the graph
|
51 |
+
builder = StateGraph(OverallState, input=InputState, output=OutputState, config_schema=IndexConfiguration)
|
52 |
+
builder.add_node("apm_indexer",index_docs)
|
53 |
+
builder.add_edge(START, "apm_indexer")
|
54 |
+
|
55 |
+
# Compile into a graph object that you can invoke and deploy.
|
56 |
+
indexer_graph = builder.compile()
|
57 |
+
indexer_graph.name = "EA4ALL APM Indexer"
|
ea4all/src/ea4all_indexer/state.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""State management for the index graph."""
|
2 |
+
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import Annotated, Optional
|
5 |
+
|
6 |
+
from langchain_core.documents import Document
|
7 |
+
|
8 |
+
from ea4all.src.shared.state import reduce_docs
|
9 |
+
|
10 |
+
@dataclass(kw_only=True)
|
11 |
+
class InputState:
|
12 |
+
"""Represents the input state for the index graph.
|
13 |
+
|
14 |
+
This class is used to pass the input documents to the index graph.
|
15 |
+
It contains a single field, `path`, which is the source of documents.
|
16 |
+
"""
|
17 |
+
|
18 |
+
path: Optional[str] = None
|
19 |
+
"""Document source path to be indexed by the graph."""
|
20 |
+
|
21 |
+
|
22 |
+
# The index state defines the simple IO for the single-node index graph
|
23 |
+
@dataclass(kw_only=True)
|
24 |
+
class OutputState:
|
25 |
+
"""Represents the state for document indexing and retrieval.
|
26 |
+
|
27 |
+
This class defines the structure of the index state, which includes
|
28 |
+
the documents to be indexed and the retriever used for searching
|
29 |
+
these documents.
|
30 |
+
"""
|
31 |
+
|
32 |
+
docs: Annotated[list[Document], reduce_docs]
|
33 |
+
"""A list of documents that the agent can index."""
|
34 |
+
|
35 |
+
@dataclass(kw_only=True)
|
36 |
+
class OverallState(InputState):
|
37 |
+
"""Represents the overall state of the index graph.
|
38 |
+
|
39 |
+
This class combines the input and output states, allowing for
|
40 |
+
both input documents and indexed documents to be managed within
|
41 |
+
the same state.
|
42 |
+
"""
|
43 |
+
|
44 |
+
pass
|
ea4all/src/ea4all_vqa/configuration.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Define the configurable parameters for the VQA agent."""
|
2 |
+
|
3 |
+
#'from __future__ import annotations
|
4 |
+
|
5 |
+
from dataclasses import dataclass, field
|
6 |
+
|
7 |
+
#'from shared.configuration import BaseConfiguration
|
8 |
+
from typing import Annotated
|
9 |
+
|
10 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
11 |
+
|
12 |
+
@dataclass(kw_only=True)
|
13 |
+
class AgentConfiguration(BaseConfiguration):
|
14 |
+
"""The configuration for the agent."""
|
15 |
+
|
16 |
+
supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
17 |
+
default="gpt-4o-mini",
|
18 |
+
metadata={
|
19 |
+
"description": "The language model used for supervisor agents. Should be in the form: provider/model-name."
|
20 |
+
},
|
21 |
+
)
|
22 |
+
|
23 |
+
vqa_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
24 |
+
default="gpt-4o-mini", #meta-llama/llama-3.2-11B-Vision-Instruct",
|
25 |
+
metadata={
|
26 |
+
"description": "The language model used for visual questions and answering. Should be in the form: provider/model-name."
|
27 |
+
},
|
28 |
+
)
|
29 |
+
|
30 |
+
vqa_max_tokens: Annotated[int, {"__template_metadata__": {"kind": "llm"}}] = field(
|
31 |
+
default=4096,
|
32 |
+
metadata={
|
33 |
+
"description": "The maximum number of tokens allowed for the visual question and answer model."
|
34 |
+
},
|
35 |
+
)
|
36 |
+
|
37 |
+
ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
|
38 |
+
default="Frontend",
|
39 |
+
metadata={
|
40 |
+
"description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
|
41 |
+
},
|
42 |
+
)
|
ea4all/src/ea4all_vqa/graph.py
ADDED
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This graph implements a Vision Question Answering (VQA) agent for architecture diagrams and flowcharts."""
|
2 |
+
"""Changelog:
|
3 |
+
- Build the VQA Graph
|
4 |
+
- Setup state shared between nodes
|
5 |
+
- DiagramSupervisor function disabled, direct call to vqa_diagram
|
6 |
+
- Retrofitied supervisor function and added build_vqa_graph
|
7 |
+
#2025-06-03
|
8 |
+
- Refactored code to fix problems with linter and type checking (Standard mode)
|
9 |
+
- Refactored to use langgraph state management for MCP compatibility.
|
10 |
+
- Enabled input BYOD (Bring Your Own Data) for indexing.
|
11 |
+
"""
|
12 |
+
#core libraries
|
13 |
+
from langchain_core.runnables import RunnableConfig
|
14 |
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
15 |
+
from langchain_core.prompts import ChatPromptTemplate
|
16 |
+
from langchain_core.runnables.base import RunnableLambda, RunnableSerializable
|
17 |
+
from langchain_core.runnables import RunnableConfig
|
18 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
19 |
+
|
20 |
+
from langchain_core.messages import (
|
21 |
+
AIMessage,
|
22 |
+
HumanMessage,
|
23 |
+
ToolMessage
|
24 |
+
)
|
25 |
+
|
26 |
+
#pydantic
|
27 |
+
from pydantic import BaseModel, Field
|
28 |
+
|
29 |
+
from json import JSONDecodeError
|
30 |
+
|
31 |
+
from typing import (
|
32 |
+
Annotated,
|
33 |
+
)
|
34 |
+
from typing_extensions import Literal, TypedDict
|
35 |
+
|
36 |
+
#Graphs, Agents
|
37 |
+
from langchain.agents import tool
|
38 |
+
from langchain_core.agents import AgentFinish
|
39 |
+
from langgraph.graph import (
|
40 |
+
START,
|
41 |
+
END,
|
42 |
+
StateGraph,
|
43 |
+
)
|
44 |
+
from langgraph.prebuilt import ToolNode, tools_condition, InjectedState
|
45 |
+
from langgraph.types import Command
|
46 |
+
from langgraph.checkpoint.memory import MemorySaver
|
47 |
+
|
48 |
+
#import APMGraph packages
|
49 |
+
from ea4all.src.ea4all_vqa.configuration import AgentConfiguration
|
50 |
+
from ea4all.src.ea4all_vqa.state import InputState, OutputState, OverallState
|
51 |
+
|
52 |
+
#import shared packages
|
53 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
54 |
+
from ea4all.src.shared.state import State
|
55 |
+
from ea4all.src.shared.utils import (
|
56 |
+
get_llm_client,
|
57 |
+
_get_formatted_date,
|
58 |
+
get_raw_image,
|
59 |
+
extract_topic_from_business_input,
|
60 |
+
set_max_new_tokens,
|
61 |
+
get_predicted_num_tokens_from_prompt,
|
62 |
+
_join_paths,
|
63 |
+
)
|
64 |
+
|
65 |
+
import spaces
|
66 |
+
|
67 |
+
##Diagram Graph Tools
|
68 |
+
#Data model Sageguarding
|
69 |
+
@tool("diagram_safeguard")
|
70 |
+
class DiagramV2S(BaseModel):
|
71 |
+
"""Check whether the image provided is an architecture diagram or flowchart and safe to be processed."""
|
72 |
+
isArchitectureImage: bool = Field(...,description="Should be True if an image is an architecture diagram or flowchart, otherwise False.")
|
73 |
+
isSafe: bool = Field(...,description="Should be True if image or question are safe to be processed, False otherwise")
|
74 |
+
description: str = Field(description="Should be a string describing the image title.")
|
75 |
+
|
76 |
+
@tool("vqa_diagram")
|
77 |
+
@spaces.GPU
|
78 |
+
async def vqa_diagram(next:str, state: Annotated[OverallState, InjectedState], config: RunnableConfig):
|
79 |
+
"""Diagram Vision Question Answering"""
|
80 |
+
|
81 |
+
print(f"---AGENT VQA PROCESSING QUESTION & ANSWERING---")
|
82 |
+
|
83 |
+
# Get any user-provided configs - LLM model in use
|
84 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
85 |
+
llm = get_llm_client(
|
86 |
+
model=configuration.vqa_model,
|
87 |
+
api_base_url=configuration.api_base_url,
|
88 |
+
streaming=configuration.streaming,
|
89 |
+
)
|
90 |
+
|
91 |
+
question = getattr(state, "question")
|
92 |
+
raw_image = get_raw_image(getattr(state,'image'))
|
93 |
+
|
94 |
+
user_message = HumanMessage(
|
95 |
+
content=[
|
96 |
+
{"type": "text", "text": f"{question}"},
|
97 |
+
{
|
98 |
+
"type": "image_url",
|
99 |
+
"image_url": {"url": f"data:image/png;base64,{raw_image}"},
|
100 |
+
},
|
101 |
+
],
|
102 |
+
)
|
103 |
+
|
104 |
+
prompt = ChatPromptTemplate.from_messages([user_message])
|
105 |
+
values = {"question:":question}
|
106 |
+
|
107 |
+
llm.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(llm, prompt, values))
|
108 |
+
chain = prompt | llm
|
109 |
+
|
110 |
+
# Not streaming the respons to MCP Inspector
|
111 |
+
#async for message in chain.astream(input=values, config={"tags": ["vqa_stream"]}, kwargs={"max_tokens": configuration.vqa_max_tokens}):
|
112 |
+
# yield message
|
113 |
+
|
114 |
+
response = await chain.ainvoke(input=values, config={"tags": ["vqa_stream"]}, kwargs={"max_tokens": configuration.vqa_max_tokens})
|
115 |
+
|
116 |
+
return response
|
117 |
+
|
118 |
+
##Supervisor Agent Function custom parse with tool calling response support
|
119 |
+
def parse(output: ToolMessage) -> dict | AgentFinish:
|
120 |
+
|
121 |
+
# Parse out the function call
|
122 |
+
print("---PARSING SUPERVISOR AGENT OUTPUT---")
|
123 |
+
print(output.content)
|
124 |
+
try:
|
125 |
+
response = extract_topic_from_business_input(output.content)
|
126 |
+
_next = response['parameters']['next']
|
127 |
+
except JSONDecodeError:
|
128 |
+
return AgentFinish(return_values={"output": output.content}, log=str(output.content))
|
129 |
+
|
130 |
+
# If no function was selected, return to user
|
131 |
+
if _next == "FINISH":
|
132 |
+
return AgentFinish(return_values={"output": output.content}, log=str(output.content))
|
133 |
+
|
134 |
+
# If the DiagramTagging function was selected, return to the user with the function inputs
|
135 |
+
tool_call = {"name": "vqa_diagram", "args": {"next": _next}, "id": "1", "type": "tool_call"}
|
136 |
+
|
137 |
+
print(f"---ROUTING QUESTIONS TO {_next}---")
|
138 |
+
print(output.content)
|
139 |
+
|
140 |
+
return {
|
141 |
+
"messages": [AIMessage("", tool_calls=[tool_call])],
|
142 |
+
"next": _next,
|
143 |
+
}
|
144 |
+
|
145 |
+
#Create Safeguarding agent
|
146 |
+
def create_safeguarding_agent(llm, system_message: str, question: str, raw_image: str):
|
147 |
+
"""Create an LLM-based safeguarding checker."""
|
148 |
+
# LLM with function call
|
149 |
+
structured_llm_safeguard = llm.with_structured_output(DiagramV2S)
|
150 |
+
|
151 |
+
human_message = HumanMessage(content=[
|
152 |
+
{"type": "text", "text": f"{question}"},
|
153 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{raw_image}"}},
|
154 |
+
])
|
155 |
+
|
156 |
+
safeguard_prompt = ChatPromptTemplate.from_messages(
|
157 |
+
[
|
158 |
+
human_message,
|
159 |
+
("system", system_message),
|
160 |
+
]
|
161 |
+
)
|
162 |
+
|
163 |
+
diagram_safeguard = safeguard_prompt | structured_llm_safeguard
|
164 |
+
|
165 |
+
return diagram_safeguard
|
166 |
+
|
167 |
+
#Safeguard custom parse
|
168 |
+
def safeguard_check(state:OverallState, config:RunnableConfig) -> dict:
|
169 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
170 |
+
llm = get_llm_client(configuration.supervisor_model)
|
171 |
+
|
172 |
+
#'raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
|
173 |
+
question = getattr(state, "question", "Describe the image")
|
174 |
+
raw_image = get_raw_image(getattr(state,'image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png')))
|
175 |
+
|
176 |
+
system_message = """You are an expert on identifying images as architecture diagrams, flowchart and whether they are innapropriate content to be processed.
|
177 |
+
Given the conversation above, is the image safe to be processed? Does the image represent an architecture diagram or flowchart?"
|
178 |
+
"""
|
179 |
+
|
180 |
+
safeguard_checker = create_safeguarding_agent(
|
181 |
+
llm,
|
182 |
+
system_message,
|
183 |
+
question,
|
184 |
+
raw_image
|
185 |
+
)
|
186 |
+
|
187 |
+
input = {"question": question, "raw_image": raw_image}
|
188 |
+
result = safeguard_checker.invoke(input=input, config=config)
|
189 |
+
|
190 |
+
# Parse out the function call
|
191 |
+
architecture_image = result['isArchitectureImage']
|
192 |
+
safe_request = result['isSafe']
|
193 |
+
description = result['description']
|
194 |
+
|
195 |
+
if architecture_image and safe_request:
|
196 |
+
print("---ROUTE REQUEST TO DIAGRAM SUPERVISOR---")
|
197 |
+
print(f"Architecture Diagram: {architecture_image} --- isSafe: {safe_request} --- {question} --- {description}")
|
198 |
+
result = True
|
199 |
+
else:
|
200 |
+
print("---ROUTE REQUEST TO FINISH---")
|
201 |
+
print(f"Architecture Diagram: {architecture_image} --- isSafe: {safe_request} -- What is it? {description}")
|
202 |
+
result = False
|
203 |
+
|
204 |
+
return {'safety_status': result}
|
205 |
+
|
206 |
+
def call_finish(state):
|
207 |
+
return {
|
208 |
+
"state": state,
|
209 |
+
"messages": [],
|
210 |
+
"next": "end",
|
211 |
+
}
|
212 |
+
|
213 |
+
def make_supervisor_node(model: BaseChatModel, members: list[str]) -> RunnableLambda:
|
214 |
+
options = ["FINISH"] + members
|
215 |
+
|
216 |
+
system_prompt = (
|
217 |
+
"You are an enterprise architecture team supervisor tasked to manage a conversation between the following members: "
|
218 |
+
"[diagram_description, diagram_object, diagram_improvement, diagram_risk]. "
|
219 |
+
"Given the user request, use the function below to respond with team member to act next. "
|
220 |
+
" If none of team member can be used, select 'FINISH'."
|
221 |
+
)
|
222 |
+
|
223 |
+
class Router(TypedDict):
|
224 |
+
"""Worker to route to next. If no workers needed, route to FINISH."""
|
225 |
+
next: Literal['FINISH', 'diagram_description', 'diagram_object', 'diagram_improvement', 'diagram_risk']
|
226 |
+
|
227 |
+
async def supervisor_node(state: OverallState, config: RunnableConfig) -> dict | AgentFinish:
|
228 |
+
|
229 |
+
"""An LLM-based router."""
|
230 |
+
messages = [
|
231 |
+
{"role": "system", "content": system_prompt},
|
232 |
+
] + getattr(state, 'messages')
|
233 |
+
|
234 |
+
response = await model.with_structured_output(Router, include_raw=True).ainvoke(messages, config=config)
|
235 |
+
|
236 |
+
if isinstance(response, dict):
|
237 |
+
if response['parsed']['next'] == "FINISH":
|
238 |
+
return AgentFinish(return_values={"output": response['raw']}, log=response['raw']['content'])
|
239 |
+
|
240 |
+
# If the DiagramTagging function was selected, return to the user with the function inputs
|
241 |
+
tool_call = {"name": "vqa_diagram", "args": {"next": response['parsed']['next']}, "id": "1", "type": "tool_call"}
|
242 |
+
|
243 |
+
return {
|
244 |
+
"messages": [AIMessage("", tool_calls=[tool_call])],
|
245 |
+
"next": response['parsed']['next'],
|
246 |
+
}
|
247 |
+
else:
|
248 |
+
return AgentFinish(return_values={"output": response}, log=str(response))
|
249 |
+
|
250 |
+
return RunnableLambda(supervisor_node)
|
251 |
+
|
252 |
+
#Create team supervisor
|
253 |
+
def create_team_supervisor(state:OverallState, config:RunnableConfig) -> RunnableSerializable: #Adding the parameter config:RunnableConfig causing Channel error
|
254 |
+
"""An LLM-based router."""
|
255 |
+
|
256 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
257 |
+
llm = get_llm_client(
|
258 |
+
configuration.vqa_model,
|
259 |
+
api_base_url=configuration.api_base_url,
|
260 |
+
)
|
261 |
+
|
262 |
+
# Supervisor Tool Prompts
|
263 |
+
system_prompt = f"""
|
264 |
+
Environment: ipython
|
265 |
+
Cutting Knowledge Date: December 2023
|
266 |
+
Today Date: {_get_formatted_date()}
|
267 |
+
"""
|
268 |
+
|
269 |
+
user_prompt = """
|
270 |
+
You are an enterprise architecture team supervisor tasked to manage a conversation between the following members:
|
271 |
+
["diagram_description", "diagram_object", "diagram_improvement", "diagram_risk"].
|
272 |
+
Given the user request, use the function below to respond with team member to act next.
|
273 |
+
If none of team member can be used, select "FINISH".
|
274 |
+
Function (in JSON format):
|
275 |
+
{
|
276 |
+
"type": "function", "function": {
|
277 |
+
"name": "route",
|
278 |
+
"description": "Select one of the available tools that should be used next.",
|
279 |
+
"parameters": {
|
280 |
+
"title": "routeSchema",
|
281 |
+
"type": "object",
|
282 |
+
"properties": {
|
283 |
+
"next": {
|
284 |
+
"title": "Next",
|
285 |
+
"anyOf": [
|
286 |
+
{"enum": ["FINISH", "diagram_description", "diagram_object", "diagram_improvement", "diagram_risk"]},
|
287 |
+
],
|
288 |
+
},
|
289 |
+
},
|
290 |
+
"required": ["next"],
|
291 |
+
},
|
292 |
+
}
|
293 |
+
}
|
294 |
+
"""
|
295 |
+
|
296 |
+
messages = [
|
297 |
+
("system", system_prompt),
|
298 |
+
("human", "{user_prompt}"),
|
299 |
+
("human", "{question}"),
|
300 |
+
]
|
301 |
+
|
302 |
+
prompt = ChatPromptTemplate.from_messages(messages).partial(
|
303 |
+
user_prompt=user_prompt)
|
304 |
+
|
305 |
+
llm.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(llm, prompt, {"question":state.question}))
|
306 |
+
|
307 |
+
supervisor_agent = (
|
308 |
+
prompt |
|
309 |
+
llm |
|
310 |
+
parse
|
311 |
+
)
|
312 |
+
|
313 |
+
return supervisor_agent
|
314 |
+
|
315 |
+
# The following functions interoperate between the top level graph state
|
316 |
+
# and the state of the research sub-graph
|
317 |
+
# this makes it so that the states of each graph don't get intermixed
|
318 |
+
def enter_graph(state:OverallState, config:RunnableConfig) -> Command[Literal['safeguard_check']]:
|
319 |
+
|
320 |
+
configuration = AgentConfiguration.from_runnable_config(config)
|
321 |
+
|
322 |
+
messages = [
|
323 |
+
HumanMessage(content=state.question) #messages[-1]['content']),
|
324 |
+
]
|
325 |
+
|
326 |
+
#if not configuration.ea4all_ask_human == "interrupt":
|
327 |
+
# raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
|
328 |
+
#else:
|
329 |
+
# image = getattr(state,'image', "")
|
330 |
+
# raw_image = image if image else _join_paths(configuration.ea4all_images,'multi-app-architecture.png')
|
331 |
+
|
332 |
+
image = getattr(state,'image', None)
|
333 |
+
if image:
|
334 |
+
raw_image = state.image #['image_url']['url'].split(',')[1]
|
335 |
+
else:
|
336 |
+
raw_image = _join_paths(configuration.ea4all_images,'multi-app-architecture.png')
|
337 |
+
|
338 |
+
return Command(
|
339 |
+
update={
|
340 |
+
"messages": messages,
|
341 |
+
"question": state.question, #messages[-1].content,
|
342 |
+
"image": raw_image
|
343 |
+
},
|
344 |
+
goto='safeguard_check',
|
345 |
+
)
|
346 |
+
|
347 |
+
return {
|
348 |
+
"messages": state.messages,
|
349 |
+
"question": messages[-1].content,
|
350 |
+
"image": raw_image,
|
351 |
+
}
|
352 |
+
|
353 |
+
async def choose_next(state: OverallState):
|
354 |
+
return "diagram_supervisor" if state.safety_status else "final"
|
355 |
+
|
356 |
+
def build_vqa_graph():
|
357 |
+
model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
|
358 |
+
teams_supervisor_node = make_supervisor_node(model, ['diagram_description', 'diagram_object', 'diagram_improvement', 'diagram_risk'])
|
359 |
+
|
360 |
+
workflow = StateGraph(OverallState, input=InputState, output=OutputState,config_schema=AgentConfiguration) #input=InputState
|
361 |
+
|
362 |
+
#Setup Graph nodes
|
363 |
+
#Node name CANNOT have blank space - pattern: \'^[a-zA-Z0-9_-]+$\'.", \'type\'
|
364 |
+
workflow.add_node("start", enter_graph)
|
365 |
+
workflow.add_node("safeguard_check", safeguard_check)
|
366 |
+
workflow.add_node("diagram_supervisor", teams_supervisor_node)
|
367 |
+
workflow.add_node("final", call_finish)
|
368 |
+
|
369 |
+
tool_node = ToolNode([vqa_diagram])
|
370 |
+
workflow.add_node("tools", tool_node)
|
371 |
+
|
372 |
+
#Setup graph edges
|
373 |
+
#Graph entry point
|
374 |
+
workflow.add_edge(START, "start")
|
375 |
+
workflow.add_edge("start", "safeguard_check")
|
376 |
+
|
377 |
+
workflow.add_conditional_edges(
|
378 |
+
"safeguard_check",
|
379 |
+
choose_next,
|
380 |
+
{
|
381 |
+
"diagram_supervisor": "diagram_supervisor",
|
382 |
+
"final": "final",
|
383 |
+
}
|
384 |
+
)
|
385 |
+
|
386 |
+
workflow.add_conditional_edges(
|
387 |
+
"diagram_supervisor",
|
388 |
+
tools_condition,
|
389 |
+
#calls one of our tools. END causes the graph to terminate (and respond to the user)
|
390 |
+
{
|
391 |
+
"tools": "tools",
|
392 |
+
END: END,
|
393 |
+
}
|
394 |
+
)
|
395 |
+
|
396 |
+
workflow.add_edge("final", END)
|
397 |
+
workflow.add_edge("tools", END)
|
398 |
+
|
399 |
+
#memory = MemorySaver()
|
400 |
+
diagram_graph = workflow.compile() #checkpointer=memory)
|
401 |
+
diagram_graph.name = "DiagramGraph"
|
402 |
+
|
403 |
+
return diagram_graph
|
404 |
+
|
405 |
+
diagram_graph = build_vqa_graph()
|
ea4all/src/ea4all_vqa/state.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""State management for the VQA graph.
|
2 |
+
|
3 |
+
This module defines the state structures used in the VQA graph. It includes
|
4 |
+
definitions for agent state, input state.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import operator
|
8 |
+
from dataclasses import dataclass
|
9 |
+
from typing import (
|
10 |
+
Optional,
|
11 |
+
Annotated,
|
12 |
+
Sequence,
|
13 |
+
)
|
14 |
+
|
15 |
+
from langchain_core.messages import (
|
16 |
+
BaseMessage,
|
17 |
+
)
|
18 |
+
|
19 |
+
from langgraph.graph import MessagesState
|
20 |
+
|
21 |
+
# Optional, the InputState is a restricted version of the State that is used to
|
22 |
+
# define a narrower interface to the outside world vs. what is maintained
|
23 |
+
# internally.
|
24 |
+
@dataclass(kw_only=True)
|
25 |
+
class InputState:
|
26 |
+
"""Represents the input state for the agent.
|
27 |
+
|
28 |
+
This class defines the structure of the input state, which includes
|
29 |
+
the messages exchanged between the user and the agent. It serves as
|
30 |
+
a restricted version of the full State, providing a narrower interface
|
31 |
+
to the outside world compared to what is maintained internally.
|
32 |
+
"""
|
33 |
+
|
34 |
+
"""Attributes:
|
35 |
+
question: user question
|
36 |
+
image: architecture diagram
|
37 |
+
"""
|
38 |
+
question: str
|
39 |
+
image: str
|
40 |
+
|
41 |
+
# The index state defines the simple IO for the single-node index graph
|
42 |
+
@dataclass(kw_only=True)
|
43 |
+
class OutputState:
|
44 |
+
"""Represents the output schema for the Diagram agent.
|
45 |
+
"""
|
46 |
+
|
47 |
+
answer: str
|
48 |
+
"""Answer to user's question about the Architectural Diagram."""
|
49 |
+
|
50 |
+
@dataclass(kw_only=True)
|
51 |
+
class OverallState(InputState):
|
52 |
+
"""Represents the overall state of the Diagram graph."""
|
53 |
+
|
54 |
+
"""Attributes:
|
55 |
+
messages: list of messages
|
56 |
+
safety_status: safety status of the diagram provided by the user
|
57 |
+
error: tool error
|
58 |
+
next: next tool to be called
|
59 |
+
"""
|
60 |
+
|
61 |
+
messages: Optional[Annotated[Sequence[BaseMessage], operator.add]] = None
|
62 |
+
safety_status: Optional[bool] = None
|
63 |
+
error: Optional[str] = None
|
64 |
+
next: Optional[str] = None
|
ea4all/src/graph.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Main file for constructing the EA4ALL hierarchical graph"""
|
2 |
+
|
3 |
+
"""
|
4 |
+
EA4ALL Hierarchical Graph
|
5 |
+
This module defines the main file for constructing the EA4ALL hierarchical graph. It contains functions and classes for creating and managing the graph structure.
|
6 |
+
Functions:
|
7 |
+
- make_supervisor_node: Creates a supervisor node for managing a conversation between architect workers.
|
8 |
+
- call_landscape_agentic: Calls the landscape agentic graph.
|
9 |
+
- call_diagram_agentic: Calls the diagram agentic graph.
|
10 |
+
- call_togaf_agentic: Calls the togaf agentic graph.
|
11 |
+
- websearch: Search for real-time data to answer user's question
|
12 |
+
Classes:
|
13 |
+
- Router: TypedDict representing the worker to route to next.
|
14 |
+
Attributes:
|
15 |
+
- model: The LLM client for the supervisor model.
|
16 |
+
- super_builder: The StateGraph builder for constructing the graph.
|
17 |
+
- super_graph: The compiled EA4ALL Agentic Workflow Graph.
|
18 |
+
Note: This module depends on other modules and packages such as langchain_core, langgraph, shared, ea4all_apm, ea4all_vqa, and ea4all_gra.
|
19 |
+
"""
|
20 |
+
|
21 |
+
"""Changelog:
|
22 |
+
- lanchain_openapi: 0.2.9 (0.3.6 issue with max_tokens for HF models)
|
23 |
+
#2025-06-03
|
24 |
+
- Refactored code to fix problems with linter and type checking (Standard mode)
|
25 |
+
"""
|
26 |
+
|
27 |
+
from langgraph.types import Command
|
28 |
+
from langchain_core.messages import (
|
29 |
+
HumanMessage,
|
30 |
+
AIMessage
|
31 |
+
)
|
32 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
33 |
+
from langchain_core.runnables import RunnableConfig
|
34 |
+
|
35 |
+
from langchain import hub
|
36 |
+
|
37 |
+
from langgraph.graph import (
|
38 |
+
START,
|
39 |
+
END,
|
40 |
+
StateGraph,
|
41 |
+
)
|
42 |
+
from langgraph.checkpoint.memory import MemorySaver
|
43 |
+
|
44 |
+
from typing_extensions import Literal, TypedDict
|
45 |
+
import uuid
|
46 |
+
|
47 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
48 |
+
from ea4all.src.shared.utils import get_llm_client
|
49 |
+
from ea4all.src.shared.state import State
|
50 |
+
from ea4all.src.tools.tools import websearch
|
51 |
+
|
52 |
+
from ea4all.src.ea4all_indexer.graph import indexer_graph
|
53 |
+
from ea4all.src.ea4all_apm.graph import apm_graph
|
54 |
+
from ea4all.src.ea4all_vqa.graph import diagram_graph
|
55 |
+
from ea4all.src.ea4all_gra.graph import togaf_graph
|
56 |
+
|
57 |
+
async def call_indexer_apm(state: State, config: RunnableConfig):
|
58 |
+
response = await indexer_graph.ainvoke(input={"docs":[]}, config=config)
|
59 |
+
|
60 |
+
def make_supervisor_node(model: BaseChatModel, members: list[str]):
|
61 |
+
options = ["FINISH"] + members
|
62 |
+
|
63 |
+
system_prompt = hub.pull("ea4all_super_graph").template
|
64 |
+
|
65 |
+
class Router(TypedDict):
|
66 |
+
"""Worker to route to next. If no workers needed, route to FINISH."""
|
67 |
+
next: Literal["FINISH", "portfolio_team", "diagram_team", "blueprint_team", "websearch_team"]
|
68 |
+
|
69 |
+
async def supervisor_node(state: State, config: RunnableConfig) -> Command[Literal["portfolio_team", "diagram_team", "blueprint_team", "websearch_team", '__end__']]:
|
70 |
+
|
71 |
+
"""An LLM-based router."""
|
72 |
+
messages = [
|
73 |
+
{"role": "system", "content": system_prompt},
|
74 |
+
] + [state["messages"][-1]]
|
75 |
+
|
76 |
+
response = await model.with_structured_output(Router).ainvoke(messages, config=config)
|
77 |
+
|
78 |
+
_goto = "__end__"
|
79 |
+
|
80 |
+
if isinstance(response, dict):
|
81 |
+
_goto = response["next"]
|
82 |
+
# Ensure _goto is one of the allowed Literal values
|
83 |
+
if _goto not in ["portfolio_team", "diagram_team", "blueprint_team", "websearch_team"]:
|
84 |
+
_goto = "__end__"
|
85 |
+
|
86 |
+
print(f"---Supervisor got a request--- Routing to {_goto} \n User Question: {state['messages'][-1].content}")
|
87 |
+
|
88 |
+
return Command(
|
89 |
+
#update={"next": _goto},
|
90 |
+
goto=_goto
|
91 |
+
)
|
92 |
+
|
93 |
+
return supervisor_node
|
94 |
+
|
95 |
+
async def call_landscape_agentic(state: State, config: RunnableConfig) -> Command[Literal['__end__']]: ##2025-02-21: NOT passing CHAT MEMORY to the APM_graph
|
96 |
+
response = await apm_graph.ainvoke({"question": state["messages"][-1].content}, config=config)
|
97 |
+
return Command(
|
98 |
+
update={
|
99 |
+
"messages": [
|
100 |
+
AIMessage(
|
101 |
+
content=response.get('generation', response['safety_status']), name="landscape_agentic"
|
102 |
+
)
|
103 |
+
]
|
104 |
+
},
|
105 |
+
goto="__end__",
|
106 |
+
)
|
107 |
+
|
108 |
+
async def call_diagram_agentic(state: State, config: RunnableConfig) -> Command[Literal['__end__']]: #NOT passing CHAT MEMORY to the Diagram_graph
|
109 |
+
|
110 |
+
inputs = {
|
111 |
+
"messages": [{"role": "user", "content": state.get('messages')[-1].content}],
|
112 |
+
"question": state['messages'][-1].content, "image":""
|
113 |
+
} #user response
|
114 |
+
|
115 |
+
response = await diagram_graph.ainvoke(
|
116 |
+
input=inputs,
|
117 |
+
config=config
|
118 |
+
)
|
119 |
+
|
120 |
+
return Command(
|
121 |
+
update={
|
122 |
+
"messages": [
|
123 |
+
AIMessage(
|
124 |
+
content=response['messages'][-1].content, name="landscape_agentic"
|
125 |
+
)
|
126 |
+
]
|
127 |
+
},
|
128 |
+
goto="__end__",
|
129 |
+
)
|
130 |
+
|
131 |
+
async def call_togaf_agentic(state: State, config: RunnableConfig) -> Command[Literal["__end__"]]: #NOT passing CHAT MEMORY to the Togaf_graph
|
132 |
+
print(f"---TOGAF ROUTE team node ready to --- CALL_TOGAF_AGENTIC Routing to {state['next']} with User Question: {state['messages'][-1].content}")
|
133 |
+
|
134 |
+
inputs = {"messages": [{"role": "user", "content": state.get('messages')[-1].content}]} #user response
|
135 |
+
|
136 |
+
response = await togaf_graph.ainvoke(
|
137 |
+
input=inputs,
|
138 |
+
config=config
|
139 |
+
) #astream not loading the graph
|
140 |
+
|
141 |
+
return Command(
|
142 |
+
update={
|
143 |
+
"messages": [
|
144 |
+
AIMessage(
|
145 |
+
content=response["messages"][-1].content, name="togaf_route"
|
146 |
+
)
|
147 |
+
]
|
148 |
+
},
|
149 |
+
goto="__end__",
|
150 |
+
)
|
151 |
+
|
152 |
+
# Wrap-up websearch answer to user's question
|
153 |
+
async def call_generate_websearch(state:State, config: RunnableConfig) -> Command[Literal["__end__"]]:
|
154 |
+
from ea4all.src.ea4all_apm.state import APMState
|
155 |
+
|
156 |
+
if config is not None:
|
157 |
+
source = config.get('metadata', {}).get('langgraph_node', 'unknown')
|
158 |
+
|
159 |
+
# Invoke GENERATOR node in the APMGraph
|
160 |
+
state_dict = {
|
161 |
+
"documents": state['messages'][-1].content,
|
162 |
+
"web_search": "Yes",
|
163 |
+
"question": state['messages'][-2].content,
|
164 |
+
"source": source
|
165 |
+
}
|
166 |
+
|
167 |
+
apm_state = APMState(**state_dict)
|
168 |
+
generation = await apm_graph.nodes["generate"].ainvoke(apm_state, config)
|
169 |
+
|
170 |
+
return Command(
|
171 |
+
update={
|
172 |
+
"messages": [
|
173 |
+
AIMessage(
|
174 |
+
content=generation['generation'], name="generate_websearch"
|
175 |
+
)
|
176 |
+
]
|
177 |
+
},
|
178 |
+
goto="__end__",
|
179 |
+
)
|
180 |
+
|
181 |
+
async def blueprint_team(state: State) -> Command[Literal["togaf_route"]]:
|
182 |
+
print("---Blueprint team got a request--- Routing to TOGAF_ROUTE node")
|
183 |
+
|
184 |
+
return Command(update={**state}, goto="togaf_route")
|
185 |
+
|
186 |
+
async def diagram_team(state: State) -> Command[Literal["diagram_route"]]:
|
187 |
+
print("---Diagram team got a request--- Routing to DIAGRAM_ROUTE node")
|
188 |
+
|
189 |
+
return Command(update={**state}, goto="diagram_route")
|
190 |
+
|
191 |
+
async def super_graph_entry_point(state: State):
|
192 |
+
# Generate a unique thread ID
|
193 |
+
thread_config = RunnableConfig({"configurable": {"thread_id": str(uuid.uuid4())}})
|
194 |
+
|
195 |
+
# Initialize state if not provided
|
196 |
+
if state is None:
|
197 |
+
state = {
|
198 |
+
"messages": [
|
199 |
+
("system", "You are a helpful assistant"),
|
200 |
+
("human", "Start the workflow")
|
201 |
+
]
|
202 |
+
}
|
203 |
+
|
204 |
+
# Build and compile the graph
|
205 |
+
graph = build_super_graph()
|
206 |
+
|
207 |
+
# Async invocation
|
208 |
+
try:
|
209 |
+
# Use ainvoke for async execution
|
210 |
+
result = await graph.ainvoke(state, config=RunnableConfig(thread_config))
|
211 |
+
return result
|
212 |
+
except Exception as e:
|
213 |
+
print(f"Graph execution error: {e}")
|
214 |
+
raise
|
215 |
+
|
216 |
+
# Define & build the graph.
|
217 |
+
def build_super_graph():
|
218 |
+
|
219 |
+
model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
|
220 |
+
teams_supervisor_node = make_supervisor_node(model, ["portfolio_team", "diagram_team", "blueprint_team","websearch_team"])
|
221 |
+
|
222 |
+
super_builder = StateGraph(State, config_schema=BaseConfiguration)
|
223 |
+
|
224 |
+
super_builder.add_node("apm_indexer", call_indexer_apm)
|
225 |
+
super_builder.add_node("supervisor", teams_supervisor_node)
|
226 |
+
super_builder.add_node("portfolio_team", call_landscape_agentic)
|
227 |
+
super_builder.add_node("websearch_team", websearch)
|
228 |
+
super_builder.add_node("diagram_team", diagram_team)
|
229 |
+
super_builder.add_node("blueprint_team", blueprint_team)
|
230 |
+
super_builder.add_node("generate_websearch", call_generate_websearch)
|
231 |
+
super_builder.add_node("diagram_route", call_diagram_agentic)
|
232 |
+
super_builder.add_node("togaf_route", call_togaf_agentic)
|
233 |
+
|
234 |
+
|
235 |
+
super_builder.add_edge(START, "apm_indexer")
|
236 |
+
super_builder.add_edge("apm_indexer", "supervisor")
|
237 |
+
|
238 |
+
super_builder.add_edge("websearch_team", "generate_websearch")
|
239 |
+
super_builder.add_edge("blueprint_team", "togaf_route")
|
240 |
+
super_builder.add_edge("diagram_team", "diagram_route")
|
241 |
+
|
242 |
+
super_builder.add_edge("portfolio_team", END)
|
243 |
+
super_builder.add_edge("generate_websearch", END)
|
244 |
+
super_builder.add_edge("togaf_route", END)
|
245 |
+
super_builder.add_edge("diagram_route", END)
|
246 |
+
|
247 |
+
#memory = MemorySaver() #With LangGraph API, inMemmory is handled directly by the platform
|
248 |
+
super_graph = super_builder.compile() #checkpointer=memory)
|
249 |
+
super_graph.name = "EA4ALL Agentic Workflow Graph"
|
250 |
+
|
251 |
+
return super_graph
|
252 |
+
|
253 |
+
# Export the graph for LangGraph Dev/Studio
|
254 |
+
super_graph = build_super_graph()
|
ea4all/src/shared/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
"""Shared utilities module."""
|
ea4all/src/shared/configuration.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Define the configurable parameters for the agent."""
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import ast
|
6 |
+
from dataclasses import dataclass, field, fields
|
7 |
+
from typing import Annotated, Any, Optional, Type, TypeVar, Literal
|
8 |
+
|
9 |
+
from langchain_core.runnables import RunnableConfig, ensure_config
|
10 |
+
|
11 |
+
# This file contains sample APPLICATIONS to index
|
12 |
+
DEFAULT_APM_CATALOGUE = "APM-ea4all (test-split).xlsx"
|
13 |
+
|
14 |
+
@dataclass(kw_only=True)
|
15 |
+
class BaseConfiguration:
|
16 |
+
"""Configuration class for all Agents.
|
17 |
+
|
18 |
+
This class defines the parameters needed for configuring the indexing and
|
19 |
+
retrieval processes, including embedding model selection, retriever provider choice, and search parameters.
|
20 |
+
"""
|
21 |
+
|
22 |
+
supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
23 |
+
default="gpt-4o-mini",
|
24 |
+
metadata={
|
25 |
+
"description": "The language model used for supervisor agents. Should be in the form: provider/model-name."
|
26 |
+
},
|
27 |
+
)
|
28 |
+
|
29 |
+
api_base_url: Annotated[str, {"__template_metadata__": {"kind": "hosting"}}] = field(
|
30 |
+
default="https://api-inference.huggingface.co/models/",
|
31 |
+
metadata={
|
32 |
+
"description": "The base url for models hosted on Hugging Face's model hub."
|
33 |
+
},
|
34 |
+
)
|
35 |
+
|
36 |
+
max_tokens: Annotated[int, {"__template_metadata__": {"kind": "llm"}}] = field(
|
37 |
+
default=4096,
|
38 |
+
metadata={
|
39 |
+
"description": "The maximum number of tokens allowed for in general question and answer model."
|
40 |
+
},
|
41 |
+
)
|
42 |
+
|
43 |
+
temperature: Annotated[int, {"__template_metadata__": {"kind": "llm"}}] = field(
|
44 |
+
default=0,
|
45 |
+
metadata={
|
46 |
+
"description": "The default tempature to infere the LLM."
|
47 |
+
},
|
48 |
+
)
|
49 |
+
|
50 |
+
streaming: Annotated[bool, {"__template_metadata__": {"kind": "llm"}}] = field(
|
51 |
+
default=True,
|
52 |
+
metadata={
|
53 |
+
"description": "Default streaming mode."
|
54 |
+
},
|
55 |
+
)
|
56 |
+
|
57 |
+
ea4all_images: str = field(
|
58 |
+
default="ea4all/images",
|
59 |
+
metadata={
|
60 |
+
"description": "Configuration for the EA4ALL images folder."
|
61 |
+
},
|
62 |
+
)
|
63 |
+
|
64 |
+
ea4all_store: Annotated[str, {"__template_metadata__": {"kind": "infra"}}] = field(
|
65 |
+
default="ea4all/ea4all_store",
|
66 |
+
metadata={
|
67 |
+
"description": "The EA4ALL folder for mock & demo content."
|
68 |
+
},
|
69 |
+
)
|
70 |
+
|
71 |
+
ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
|
72 |
+
default="interrupt", #"Frontend"
|
73 |
+
metadata={
|
74 |
+
"description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
|
75 |
+
},
|
76 |
+
)
|
77 |
+
|
78 |
+
ea4all_recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "graph"}}] = field(
|
79 |
+
default=25,
|
80 |
+
metadata={
|
81 |
+
"description": "Maximum recursion allowed for EA4ALL graphs."
|
82 |
+
},
|
83 |
+
)
|
84 |
+
|
85 |
+
# models
|
86 |
+
embedding_model: Annotated[str, {"__template_metadata__": {"kind": "embeddings"}}] = field(
|
87 |
+
default="openai/text-embedding-3-small",
|
88 |
+
metadata={
|
89 |
+
"description": "Name of the embedding model to use. Must be a valid embedding model name."
|
90 |
+
},
|
91 |
+
)
|
92 |
+
|
93 |
+
retriever_provider: Annotated[
|
94 |
+
Literal["faiss"],
|
95 |
+
{"__template_metadata__": {"kind": "retriever"}},
|
96 |
+
] = field(
|
97 |
+
default="faiss",
|
98 |
+
metadata={
|
99 |
+
"description": "The vector store provider to use for retrieval. Options are 'FAISS' at moment only."
|
100 |
+
},
|
101 |
+
)
|
102 |
+
|
103 |
+
apm_faiss: Annotated[str, {"__template_metadata__": {"kind": "infra"}}] = field(
|
104 |
+
default="apm_faiss_index",
|
105 |
+
metadata={
|
106 |
+
"description": "The EA4ALL APM default Vectorstore index name."
|
107 |
+
},
|
108 |
+
)
|
109 |
+
|
110 |
+
apm_catalogue: str = field(
|
111 |
+
default=DEFAULT_APM_CATALOGUE,
|
112 |
+
metadata={
|
113 |
+
"description": "The EA4ALL APM default Vectorstore index name."
|
114 |
+
},
|
115 |
+
)
|
116 |
+
|
117 |
+
search_kwargs: Annotated[str, {"__template_metadata__": {"kind": "retriever"}}] = field(
|
118 |
+
#default="{'k': 50, 'score_threshold': 0.8, 'filter': {'namespace':'ea4all_agent'}}",
|
119 |
+
default="{'k':10, 'fetch_k':50}",
|
120 |
+
metadata={
|
121 |
+
"description": "Additional keyword arguments to pass to the search function of the retriever."
|
122 |
+
}
|
123 |
+
)
|
124 |
+
|
125 |
+
def __post_init__(self):
|
126 |
+
# Convert search_kwargs from string to dictionary
|
127 |
+
try:
|
128 |
+
if isinstance(self.search_kwargs, str):
|
129 |
+
self.search_kwargs = ast.literal_eval(self.search_kwargs)
|
130 |
+
except (SyntaxError, ValueError):
|
131 |
+
# Fallback to an empty dict or log an error
|
132 |
+
self.search_kwargs = {}
|
133 |
+
print("Error parsing search_kwargs")
|
134 |
+
|
135 |
+
@classmethod
|
136 |
+
def from_runnable_config(
|
137 |
+
cls: Type[T], config: Optional[RunnableConfig] = None
|
138 |
+
) -> T:
|
139 |
+
"""Create an IndexConfiguration instance from a RunnableConfig object.
|
140 |
+
|
141 |
+
Args:
|
142 |
+
cls (Type[T]): The class itself.
|
143 |
+
config (Optional[RunnableConfig]): The configuration object to use.
|
144 |
+
|
145 |
+
Returns:
|
146 |
+
T: An instance of IndexConfiguration with the specified configuration.
|
147 |
+
"""
|
148 |
+
config = ensure_config(config)
|
149 |
+
configurable = config.get("configurable") or {}
|
150 |
+
_fields = {f.name for f in fields(cls) if f.init}
|
151 |
+
|
152 |
+
# Special handling for search_kwargs
|
153 |
+
if 'search_kwargs' in configurable and isinstance(configurable['search_kwargs'], str):
|
154 |
+
try:
|
155 |
+
configurable['search_kwargs'] = ast.literal_eval(configurable['search_kwargs'])
|
156 |
+
except (SyntaxError, ValueError):
|
157 |
+
configurable['search_kwargs'] = {}
|
158 |
+
|
159 |
+
return cls(**{k: v for k, v in configurable.items() if k in _fields})
|
160 |
+
|
161 |
+
T = TypeVar("T", bound=BaseConfiguration)
|
ea4all/src/shared/prompts.py
ADDED
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from langchain_core.prompts.chat import (
|
4 |
+
ChatPromptTemplate,
|
5 |
+
HumanMessagePromptTemplate,
|
6 |
+
SystemMessagePromptTemplate
|
7 |
+
)
|
8 |
+
|
9 |
+
from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
|
10 |
+
from langchain_core.prompts import MessagesPlaceholder, format_document
|
11 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
+
from langchain.chains.prompt_selector import ConditionalPromptSelector
|
13 |
+
|
14 |
+
from langchain_core.messages import (
|
15 |
+
HumanMessage,
|
16 |
+
)
|
17 |
+
|
18 |
+
from langchain_core.output_parsers import (
|
19 |
+
JsonOutputParser
|
20 |
+
)
|
21 |
+
|
22 |
+
from langsmith import (
|
23 |
+
traceable,
|
24 |
+
)
|
25 |
+
################################
|
26 |
+
##COLLECTION of prompt functions
|
27 |
+
################################
|
28 |
+
##Llama-3.1 Prompt Format
|
29 |
+
# Define the prompt format with special tokens
|
30 |
+
LLAMA31_CHAT_PROMPT_FORMAT = (
|
31 |
+
"<|begin_of_text|>"
|
32 |
+
"<|start_header_id|>system<|end_header_id|>{system_message}<|eot_id|>\n"
|
33 |
+
"<|start_header_id|>user<|end_header_id|>{human_message}<|eot_id|>\n"
|
34 |
+
"<|start_header_id|>ai<|end_header_id|>{ai_message}\n"
|
35 |
+
)
|
36 |
+
|
37 |
+
LLAMA31_PROMPT_FORMAT = (
|
38 |
+
"<|begin_of_text|>"
|
39 |
+
"<|start_header_id|>system<|end_header_id|>{system_message}<|eot_id|>\n"
|
40 |
+
"<|start_header_id|>user<|end_header_id|>{human_message}<|eot_id|>\n"
|
41 |
+
"<|start_header_id|>ai<|end_header_id|>{ai_message}\n"
|
42 |
+
)
|
43 |
+
|
44 |
+
##return a prompt-template class with informed user inquiry
|
45 |
+
def ea4all_prompt(query):
|
46 |
+
prompt_template = PromptTemplate(
|
47 |
+
input_variables=["query", "answer"],
|
48 |
+
template=TEMPLATE_QUERY_ANSWER)
|
49 |
+
|
50 |
+
prompt = prompt_template.format(
|
51 |
+
query=query,
|
52 |
+
answer="")
|
53 |
+
|
54 |
+
return prompt
|
55 |
+
|
56 |
+
##return a chat-prompt-template class from the informed template
|
57 |
+
def ea4all_chat_prompt(template):
|
58 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
59 |
+
human_template = "{user_question}"
|
60 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
61 |
+
|
62 |
+
ea4all_prompt = ChatPromptTemplate.from_messages(
|
63 |
+
messages=[
|
64 |
+
system_message_prompt,
|
65 |
+
## MessagesPlaceholder(variable_name="history"),
|
66 |
+
human_message_prompt],
|
67 |
+
)
|
68 |
+
ea4all_prompt.output_parser=JsonOutputParser()
|
69 |
+
|
70 |
+
return ea4all_prompt
|
71 |
+
|
72 |
+
##select best prompt based on user inquiry's category
|
73 |
+
@traceable(
|
74 |
+
tags={os.environ["EA4ALL_ENV"]}
|
75 |
+
)
|
76 |
+
def ea4ll_prompt_selector(category):
|
77 |
+
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
|
78 |
+
default_prompt = ea4all_chat_prompt(GENERAL_TEMPLATE),
|
79 |
+
conditionals=[
|
80 |
+
(lambda category: True if category == "Strategy" else False, ea4all_chat_prompt(STRATEGY_TEMPLATE)),
|
81 |
+
(lambda category: True if category == "Application" else False, ea4all_chat_prompt(APPLICATION_TEMPLATE)),
|
82 |
+
(lambda category: True if category == "Recommendation" else False, ea4all_chat_prompt(RECOMMENDATION_TEMPLATE)),
|
83 |
+
(lambda category: True if category not in ("Strategy","Application", "Recommendation") else False, ea4all_chat_prompt(GENERAL_TEMPLATE))
|
84 |
+
]
|
85 |
+
)
|
86 |
+
|
87 |
+
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(category)
|
88 |
+
|
89 |
+
return(prompt)
|
90 |
+
|
91 |
+
|
92 |
+
#######################
|
93 |
+
##COLLECTION of prompts
|
94 |
+
#######################
|
95 |
+
|
96 |
+
##Template-basic instruction + context
|
97 |
+
TEMPLATE_CONTEXT = """You are a helpful Enterprise Architect with knowledge on enterprises IT landscapes.
|
98 |
+
Use only the context delimited by trible backticks to answer questions. Return the answer formatted as a text paragraph.
|
99 |
+
If you don't know the answer return I could not find the information.
|
100 |
+
Don't make up the response.
|
101 |
+
Context: ```{cdocs}```
|
102 |
+
Help answer: ""
|
103 |
+
"""
|
104 |
+
|
105 |
+
##Template-basic instruction + question + answer
|
106 |
+
TEMPLATE_QUERY_ANSWER = """You are Enterprise Architect highly knowledgable on IT landscape. \
|
107 |
+
Answer the question that is delimited by triple backticks into a style that is bullet list. \
|
108 |
+
If the question cannot be answered using the information provided answer with "I don't know". \
|
109 |
+
|
110 |
+
Always say "thanks for asking!" at the end of the answer.
|
111 |
+
|
112 |
+
Question: ```{user_question}```
|
113 |
+
Answer: {answer}
|
114 |
+
"""
|
115 |
+
|
116 |
+
TEMPLATE_APM_QNA_ROUTING = """application portfolio assessment, application/IT landscape rationalisation, simplification or optimisation, business capability assessment, line of business landscape, who can I talk to, assistance from architecture team."""
|
117 |
+
|
118 |
+
##Template-break-into-simpler-tasks
|
119 |
+
#https://platform.openai.com/docs/guides/prompt-engineering/strategy-split-complex-tasks-into-simpler-subtasks
|
120 |
+
TEMPLATE_HEADER = """You are a helpful enterprise architect assistant. """
|
121 |
+
TEMPLATE_HEADER += """Your goal is to provide accurate and detailed responses to user inquiry. """
|
122 |
+
TEMPLATE_HEADER += """You have access to a vast amount of enterprise architecture knowledge, """
|
123 |
+
TEMPLATE_HEADER += """and you can understand and generate language fluently. """
|
124 |
+
TEMPLATE_HEADER += """You can assist with a wide range of architectural topics, including but not limited to """
|
125 |
+
TEMPLATE_HEADER += """business, application, data and technology architectures. """
|
126 |
+
TEMPLATE_HEADER += """You should always strive to promote a positive and respectful conversation.
|
127 |
+
"""
|
128 |
+
|
129 |
+
TEMPLATE_TASKS = ""
|
130 |
+
TEMPLATE_TASKS += """You will be provided with a user inquiry. """
|
131 |
+
TEMPLATE_TASKS += """Classify the inquiry into primary category and secondary category. """
|
132 |
+
TEMPLATE_TASKS += """Primary categories: Strategy, Application, Recommendation or General Inquiry. """
|
133 |
+
TEMPLATE_TASKS += """Strategy secondary categories:
|
134 |
+
- Architecture and Technology Strategy
|
135 |
+
- Vision
|
136 |
+
- Architecture Principles
|
137 |
+
"""
|
138 |
+
TEMPLATE_TASKS += """Application secondary categories:
|
139 |
+
- Meet business and technical need
|
140 |
+
- Business criticality
|
141 |
+
- Roadmap
|
142 |
+
- Business Capability
|
143 |
+
- Hosting
|
144 |
+
"""
|
145 |
+
TEMPLATE_TASKS += """Recommendation secondary categories:
|
146 |
+
- Application rationalisation
|
147 |
+
- Landscape simplification
|
148 |
+
- Reuse existent invested application
|
149 |
+
- Business capability with overlapping applications
|
150 |
+
- Opportunities and innovation
|
151 |
+
"""
|
152 |
+
TEMPLATE_TASKS += """General inquiry:
|
153 |
+
- Speak to an architect
|
154 |
+
"""
|
155 |
+
TEMPLATE_TASKS += """You may also revise the original inquiry if you think that revising \
|
156 |
+
it will ultimately lead to a better response from the language model """
|
157 |
+
TEMPLATE_TASKS += """Provide your output in JSON format with the keys: primary, secondary, question.
|
158 |
+
"""
|
159 |
+
|
160 |
+
#Template-break-into-specific-prompt-by-category
|
161 |
+
strategy_template = """You will be provided with inquiry about architecture strategy.
|
162 |
+
Follow these steps to answer user inquiry:
|
163 |
+
STEP 1 - Using only the context delimited by triple backticks.
|
164 |
+
STEP 2 - Look at application with roadmap to invest.
|
165 |
+
STEP 3 - Extract the information that is only relevant to help answer the user inquiry
|
166 |
+
"""
|
167 |
+
|
168 |
+
application_template = """You will be provided with an inquiry about application architecture.
|
169 |
+
Follow these steps to answer user inquiry:
|
170 |
+
STEP 1 - Using only the context delimited by triple backticks.
|
171 |
+
STEP 2 - Extract the information that is only relevant to help answer the user inquiry
|
172 |
+
"""
|
173 |
+
|
174 |
+
recommendation_template = """You will be provided with enterprise architecture inquiry that needs a recommendation.
|
175 |
+
Follow these steps to answer user inquiry:
|
176 |
+
STEP 1 - Use only the context delimited by triple backticks.
|
177 |
+
STEP 2 - Look at applications with low business or technical fit
|
178 |
+
STEP 3 - Look at application with roadmap diffent to invest
|
179 |
+
STEP 4 - Look at applicatins hosted on premise
|
180 |
+
STEP 5 - Look at Business capability with overlapping applications
|
181 |
+
"""
|
182 |
+
|
183 |
+
general_template = """You will provided with a general inquiry about enterprise architecture IT landscape.
|
184 |
+
Follow these steps to answer user queries:
|
185 |
+
STEP 1 - use only the context delimited by triple backticks
|
186 |
+
STEP 2 - Extract the information that is only relevant to help answer the user inquiry
|
187 |
+
"""
|
188 |
+
|
189 |
+
default_template = """
|
190 |
+
FINAL STEP - Do not make up or guess ANY extra information. \
|
191 |
+
Ask follow-up question to the user if you need further clarification to understand and answer their inquiry. \
|
192 |
+
After a follow-up question if you still don't know the answer or don't find specific information needed to answer the user inquiry \
|
193 |
+
return I could not find the information. \
|
194 |
+
Ensure that the response contain all relevant context needed to interpret them -
|
195 |
+
in other words don't extract small snippets that are missing important context.
|
196 |
+
Format the output as top-like string formatted with the most appropriate style to make it clear, concise and user-friendly for a chatbot response.
|
197 |
+
Here is the question: {user_question}
|
198 |
+
Here is the context: ```{cdocs}```
|
199 |
+
"""
|
200 |
+
STRATEGY_TEMPLATE = TEMPLATE_HEADER + strategy_template + default_template
|
201 |
+
APPLICATION_TEMPLATE = TEMPLATE_HEADER + application_template + default_template
|
202 |
+
RECOMMENDATION_TEMPLATE = TEMPLATE_HEADER + recommendation_template + default_template
|
203 |
+
GENERAL_TEMPLATE = TEMPLATE_HEADER + general_template + default_template
|
204 |
+
|
205 |
+
|
206 |
+
###############################################
|
207 |
+
##COLLECTION of prompts for conversation memory
|
208 |
+
###############################################
|
209 |
+
|
210 |
+
_template = """Given the following conversation and a follow up question,\
|
211 |
+
rephrase the follow up question to be a standalone question, in its original language.\
|
212 |
+
Chat History:
|
213 |
+
{chat_history}
|
214 |
+
Follow Up Input: {user_question}
|
215 |
+
Standalone question:"""
|
216 |
+
|
217 |
+
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
218 |
+
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
|
219 |
+
|
220 |
+
|
221 |
+
def _combine_documents(
|
222 |
+
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
|
223 |
+
):
|
224 |
+
doc_strings = [format_document(doc, document_prompt) for doc in docs]
|
225 |
+
|
226 |
+
return document_separator.join(doc_strings)
|
227 |
+
|
228 |
+
|
229 |
+
##################################################
|
230 |
+
##COLLECTION of prompts - RAG query transformation
|
231 |
+
##################################################
|
232 |
+
## Multi Query
|
233 |
+
# Prompt
|
234 |
+
multiquery_template = """You are an AI Enterprise Architect language model assistant. Your task is to generate five
|
235 |
+
different versions of the given user question to retrieve relevant documents from a vector
|
236 |
+
database. By generating multiple perspectives on the user question, your goal is to help
|
237 |
+
the user overcome some of the limitations of the distance-based similarity search.
|
238 |
+
Provide these alternative questions separated by newlines. Original question: {standalone_question}"""
|
239 |
+
|
240 |
+
decomposition_template = """You are a helpful enterprise architect assistant that generates multiple sub-questions related to an input question. \n
|
241 |
+
The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n
|
242 |
+
Generate multiple search queries related to: {user_question} \n
|
243 |
+
Output (3 queries):"""
|
244 |
+
|
245 |
+
decomposition_answer_recursevely_template = """
|
246 |
+
Here is the question you need to answer:
|
247 |
+
|
248 |
+
\n --- \n {question} \n --- \n
|
249 |
+
|
250 |
+
Here is any available background question + answer pairs:
|
251 |
+
|
252 |
+
\n --- \n {q_a_pairs} \n --- \n
|
253 |
+
|
254 |
+
Here is additional context relevant to the question:
|
255 |
+
|
256 |
+
\n --- \n {context} \n --- \n
|
257 |
+
|
258 |
+
Use the above context and any background question + answer pairs to answer the question: \n {user_question}
|
259 |
+
"""
|
260 |
+
|
261 |
+
rag_fusion_questions_template = """You are a helpful enterprise architect assistant that generates multiple search queries based on a single input query. \n
|
262 |
+
Generate multiple search queries related to: {standalone_question} \n
|
263 |
+
Output (4 queries):"""
|
264 |
+
|
265 |
+
# Few Shot Examples
|
266 |
+
few_shot_step_back_examples = [
|
267 |
+
{
|
268 |
+
"input": "Could the members of The Police perform lawful arrests?",
|
269 |
+
"output": "what can the members of The Police do?",
|
270 |
+
},
|
271 |
+
{
|
272 |
+
"input": "Jan Sindel was born in what country?",
|
273 |
+
"output": "what is Jan Sindel personal history?",
|
274 |
+
},
|
275 |
+
]
|
276 |
+
# We now transform these to example messages
|
277 |
+
few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
|
278 |
+
[
|
279 |
+
("human", "{input}"),
|
280 |
+
("ai", "{output}"),
|
281 |
+
]
|
282 |
+
)
|
283 |
+
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
284 |
+
input_variables=["standalone_question"],
|
285 |
+
example_prompt=few_shot_step_back_examples_prompt,
|
286 |
+
examples=few_shot_step_back_examples,
|
287 |
+
)
|
288 |
+
few_shot_step_back_prompt = ChatPromptTemplate.from_messages(
|
289 |
+
[
|
290 |
+
(
|
291 |
+
"system",
|
292 |
+
"""You are an expert at enterprise architecture world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
|
293 |
+
),
|
294 |
+
# Few shot examples
|
295 |
+
few_shot_prompt,
|
296 |
+
# New question
|
297 |
+
("user", "{standalone_question}"),
|
298 |
+
]
|
299 |
+
)
|
300 |
+
# Response prompt
|
301 |
+
step_back_response_prompt_template = """You are an expert of enterprise architecture world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
|
302 |
+
|
303 |
+
# {normal_context}
|
304 |
+
# {step_back_context}
|
305 |
+
|
306 |
+
# Original Question: {standalone_question}
|
307 |
+
"""
|
308 |
+
|
309 |
+
# HyDE document generation
|
310 |
+
hyde_template = """Please write an architecture scientific passage to answer the question
|
311 |
+
Question: {standalone_question}
|
312 |
+
Passage:"""
|
313 |
+
|
314 |
+
##################################################
|
315 |
+
##COLLECTION of prompts - Agentic Workflows
|
316 |
+
##################################################
|
317 |
+
#Agent system prompt
|
318 |
+
#System prompt embedded into human prompt
|
319 |
+
awqa_human_message = HumanMessage(content=[
|
320 |
+
{"type": "text", "text": "{user_question}"},
|
321 |
+
{"type": "text", "text": "You are a helpful AI assistant, collaborating with other assistants."},
|
322 |
+
{"type": "text", "text": "{system_message}"},
|
323 |
+
{"type": "text", "text": " Use the provided tools to progress towards answering the question."},
|
324 |
+
{"type": "text", "text": " You have access to the following tools: {tool_names}."},
|
325 |
+
])
|
326 |
+
|
327 |
+
awqa_template = ChatPromptTemplate.from_messages(
|
328 |
+
[
|
329 |
+
(
|
330 |
+
"human",
|
331 |
+
"You are a helpful AI assistant, collaborating with other assistants."
|
332 |
+
"{system_message}"
|
333 |
+
" Use the provided tools to progress towards answering the question: {user_question}"
|
334 |
+
" You have access to the following tools: {tool_names}."
|
335 |
+
),
|
336 |
+
MessagesPlaceholder(variable_name="messages"),
|
337 |
+
]
|
338 |
+
)
|
339 |
+
|
340 |
+
#DiagramV2T
|
341 |
+
diagramV2T_question = "How this architecture solution meets quality standards and alignment with architectural best practices?"
|
342 |
+
diagramV2T_template = """An image will be passed to you. Please explain how it meets quality standards and alignment with architecture best practices."""
|
343 |
+
agent_diagram_v2t_system_message = diagramV2T_template
|
344 |
+
|
345 |
+
#DiagramType
|
346 |
+
diagram_type_question = "What is this diagram type? Is a flowchart, C4, sequence-diagram, data flow or any other?"
|
347 |
+
diagramType_template = """An image will be passed to you. Identify the type of architecture diagram this image is.
|
348 |
+
For example, flowchart, C4, sequence flow, data flow, or other.
|
349 |
+
|
350 |
+
If a type of diagram is not identified that's fine! Just return a that is was not possible to identify the architectural diagram style in this image.
|
351 |
+
|
352 |
+
Do not make up or guess ANY extra information. Only extract what exactly diagram type is the images.
|
353 |
+
"""
|
354 |
+
|
355 |
+
agent_diagram_type_system_message = diagramType_template
|
356 |
+
|
357 |
+
#DiagramComponents
|
358 |
+
diagram_component_question = "Please list all components that are part of this current solution architecture"
|
359 |
+
diagramComponent_template = """An image will be passed to you. Extract from it all components identified in this image.
|
360 |
+
For example, application, software, connector, relationship, user, name, microservice, middeware, container or other.
|
361 |
+
|
362 |
+
If no components are identified that's fine - you don't need to extract any! Just return an empty list.
|
363 |
+
|
364 |
+
Do not make up or guess ANY extra information. Only extract what exactly is in the images.
|
365 |
+
"""
|
366 |
+
|
367 |
+
agent_diagram_components_system_message = diagramComponent_template
|
368 |
+
|
369 |
+
#DiagramRiskVulnerabilityMitigation
|
370 |
+
diagram_risk_question = "What are the potential risks and vulnerabilities in this current solution architecture, and how can we mitigate them?"
|
371 |
+
diagramRVM_template = """An image will be passed to you. Extract from it potential risks and vulnerabilities along with mitigation strategy in current solution architecture.
|
372 |
+
|
373 |
+
For example, risk: SQL injection, description: application A connected to MySQL database, mitigation: Use prepared
|
374 |
+
statements and parameterised queries to handle user input. Also, implement input validation and sanitisation to prevent malicious input from being processed.
|
375 |
+
|
376 |
+
If no risks, vulnerabilities or mitigation strategy are identified that's fine - you don't need to extract any! Just return an empty list.
|
377 |
+
|
378 |
+
Do not make up or guess ANY extra information. Only extract what exactly is in the image.
|
379 |
+
"""
|
380 |
+
|
381 |
+
agent_diagram_rvm_system_message = diagramRVM_template
|
382 |
+
|
383 |
+
#DiagramPatternsStandardsBestPractices
|
384 |
+
diagram_pattern_question = "Please describe well-architected patterns, standards and best practices that can be applied to the current solution architecture."
|
385 |
+
diagramPSBP_template = """An image will be passed to you.
|
386 |
+
List well-architected standards, patterns or best-practices that can be applied to the current solution architecture.
|
387 |
+
"""
|
388 |
+
agent_diagram_psbp_system_message = diagramPSBP_template
|
389 |
+
|
390 |
+
#DiagramVisualQuestionAnswerer Prompts
|
391 |
+
diagramVQA_question = """Please describe this diagram"""
|
392 |
+
diagramVQA_template = """An image will be passed to you. It should be a flowchart or diagram. Please answer the user question."""
|
393 |
+
agent_diagram_vqa_system_message = diagramVQA_template
|
ea4all/src/shared/state.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Shared functions for state management."""
|
2 |
+
|
3 |
+
import hashlib
|
4 |
+
import uuid
|
5 |
+
from typing import Any, Literal, Optional, Union
|
6 |
+
|
7 |
+
from langgraph.graph import MessagesState
|
8 |
+
from langchain_core.documents import Document
|
9 |
+
|
10 |
+
class State(MessagesState):
|
11 |
+
next: Optional[str]
|
12 |
+
user_feedback: Optional[str]
|
13 |
+
|
14 |
+
def _generate_uuid(page_content: str) -> str:
|
15 |
+
"""Generate a UUID for a document based on page content."""
|
16 |
+
md5_hash = hashlib.md5(page_content.encode()).hexdigest()
|
17 |
+
return str(uuid.UUID(md5_hash))
|
18 |
+
|
19 |
+
def reduce_docs(
|
20 |
+
existing: Optional[list[Document]],
|
21 |
+
new: Union[
|
22 |
+
list[Document],
|
23 |
+
list[dict[str, Any]],
|
24 |
+
list[str],
|
25 |
+
str,
|
26 |
+
Literal["delete"],
|
27 |
+
],
|
28 |
+
) -> list[Document]:
|
29 |
+
"""Reduce and process documents based on the input type.
|
30 |
+
|
31 |
+
This function handles various input types and converts them into a sequence of Document objects.
|
32 |
+
It can delete existing documents, create new ones from strings or dictionaries, or return the existing documents.
|
33 |
+
It also combines existing documents with the new one based on the document ID.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
existing (Optional[Sequence[Document]]): The existing docs in the state, if any.
|
37 |
+
new (Union[Sequence[Document], Sequence[dict[str, Any]], Sequence[str], str, Literal["delete"]]):
|
38 |
+
The new input to process. Can be a sequence of Documents, dictionaries, strings, a single string,
|
39 |
+
or the literal "delete".
|
40 |
+
"""
|
41 |
+
if new == "delete":
|
42 |
+
return []
|
43 |
+
|
44 |
+
existing_list = list(existing) if existing else []
|
45 |
+
if isinstance(new, str):
|
46 |
+
return existing_list + [
|
47 |
+
Document(page_content=new, metadata={"uuid": _generate_uuid(new)})
|
48 |
+
]
|
49 |
+
|
50 |
+
new_list = []
|
51 |
+
if isinstance(new, list):
|
52 |
+
existing_ids = set(doc.metadata.get("uuid") for doc in existing_list)
|
53 |
+
for item in new:
|
54 |
+
if isinstance(item, str):
|
55 |
+
item_id = _generate_uuid(item)
|
56 |
+
new_list.append(Document(page_content=item, metadata={"uuid": item_id}))
|
57 |
+
existing_ids.add(item_id)
|
58 |
+
|
59 |
+
elif isinstance(item, dict):
|
60 |
+
metadata = item.get("metadata", {})
|
61 |
+
item_id = metadata.get("uuid") or _generate_uuid(
|
62 |
+
item.get("page_content", "")
|
63 |
+
)
|
64 |
+
|
65 |
+
if item_id not in existing_ids:
|
66 |
+
new_list.append(
|
67 |
+
Document(**{**item, "metadata": {**metadata, "uuid": item_id}})
|
68 |
+
)
|
69 |
+
existing_ids.add(item_id)
|
70 |
+
|
71 |
+
elif isinstance(item, Document):
|
72 |
+
item_id = item.metadata.get("uuid", "")
|
73 |
+
if not item_id:
|
74 |
+
item_id = _generate_uuid(item.page_content)
|
75 |
+
new_item = item.copy(deep=True)
|
76 |
+
new_item.metadata["uuid"] = item_id
|
77 |
+
else:
|
78 |
+
new_item = item
|
79 |
+
|
80 |
+
if item_id not in existing_ids:
|
81 |
+
new_list.append(new_item)
|
82 |
+
existing_ids.add(item_id)
|
83 |
+
|
84 |
+
return existing_list + new_list
|
ea4all/src/shared/utils.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Shared utility functions used in the project.
|
2 |
+
|
3 |
+
Functions:
|
4 |
+
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import datetime
|
9 |
+
import getpass
|
10 |
+
import base64
|
11 |
+
import json
|
12 |
+
import re
|
13 |
+
from dotenv import load_dotenv, find_dotenv
|
14 |
+
import markdown
|
15 |
+
from markdownify import markdownify as md2text
|
16 |
+
from io import BytesIO
|
17 |
+
import pandas as pd
|
18 |
+
|
19 |
+
from pydantic import BaseModel, SecretStr
|
20 |
+
|
21 |
+
from langchain_community.vectorstores import Chroma
|
22 |
+
from langchain import hub
|
23 |
+
from langchain_core.prompts import PromptTemplate
|
24 |
+
|
25 |
+
#Model & Index & Embeddings
|
26 |
+
from langchain_openai import (
|
27 |
+
ChatOpenAI,
|
28 |
+
)
|
29 |
+
|
30 |
+
from langchain_core.output_parsers import (
|
31 |
+
PydanticOutputParser,
|
32 |
+
)
|
33 |
+
|
34 |
+
from langchain_core.messages import (
|
35 |
+
AIMessage,
|
36 |
+
HumanMessage,
|
37 |
+
get_buffer_string,
|
38 |
+
)
|
39 |
+
|
40 |
+
from PIL import Image
|
41 |
+
|
42 |
+
from ea4all.src.shared.prompts import (
|
43 |
+
LLAMA31_CHAT_PROMPT_FORMAT,
|
44 |
+
)
|
45 |
+
|
46 |
+
from ea4all.src.shared.configuration import BaseConfiguration as ea4all_config
|
47 |
+
|
48 |
+
############
|
49 |
+
##INIT model
|
50 |
+
############
|
51 |
+
#initialise model
|
52 |
+
class CFG:
|
53 |
+
# Constants
|
54 |
+
EA4ALL_ARCHITECTURE = "ea4all_architecture.png"
|
55 |
+
EA4ALL_OVERVIEW = "ea4all_overview.png"
|
56 |
+
EA4ALL_ABOUT = "ea4all_overview.txt"
|
57 |
+
EA4ALL_PODCAST = "ea4all_podcast.wav"
|
58 |
+
APM_MOCK_QNA = "apm_qna_mock.txt"
|
59 |
+
STREAM_SLEEP = 0.05
|
60 |
+
REGEX_BACKTICKS = r"```(.*?)```"
|
61 |
+
|
62 |
+
# LLMs
|
63 |
+
#model = {"gpt-4":'gpt-4o-mini', "gpt-4o":'gpt-4o'}
|
64 |
+
#llama = {"11": "meta-llama/llama-3.2-11B-Vision-Instruct", "90":"meta-llama/llama-3.2-90B-Vision-Instruct", "70":"meta-llama/Llama-3.1-70B-Instruct", "73":"meta-llama/Llama-3.3-70B-Instruct"}
|
65 |
+
|
66 |
+
#hf_model="meta-llama/Llama-3.1-70B-Instruct"
|
67 |
+
#hf_api_base="https://api-inference.huggingface.co/models/"
|
68 |
+
|
69 |
+
#hf_max_tokens=16192
|
70 |
+
#max_new_tokens = 4096
|
71 |
+
#llama32_max_tokens = 4096 ##TOKEN ISSUE LLAMA-3.2 w/ ChatOpenAI not working tokens > 4096 2024-10-13
|
72 |
+
|
73 |
+
#temperature = 0
|
74 |
+
top_p = 0.95
|
75 |
+
repetition_penalty = 1.15
|
76 |
+
|
77 |
+
# splitting
|
78 |
+
split_chunk_size = 500
|
79 |
+
split_overlap = 0
|
80 |
+
|
81 |
+
# embeddings
|
82 |
+
#embeddings_model = OpenAIEmbeddings()
|
83 |
+
|
84 |
+
# similar passages
|
85 |
+
k = 3
|
86 |
+
|
87 |
+
#debug
|
88 |
+
verbose=True
|
89 |
+
|
90 |
+
#streamming
|
91 |
+
#streamming=True
|
92 |
+
|
93 |
+
#VQA resized images - maximum resolution for Llama-3.2
|
94 |
+
RESIZE_TO = 512
|
95 |
+
MAX_WIDTH = 1024
|
96 |
+
MAX_HEIGHT = 768
|
97 |
+
|
98 |
+
##Diagrams format
|
99 |
+
diagram_format = "png"
|
100 |
+
|
101 |
+
# paths ea4all/src/tools
|
102 |
+
#apm_store = "/Users/avfranco/Documents/GitHub/ea4all-agentic-staging/ea4all/apm_store/"
|
103 |
+
#apm_path = apm_store + 'APM-ea4all (test-split).xlsx'
|
104 |
+
#dbr_demo = apm_store + "reference_architecture_dbr_demo.txt"
|
105 |
+
|
106 |
+
#'ea4all_images = "/Users/avfranco/Documents/GitHub/ea4all-agentic-staging/ea4all/images/"
|
107 |
+
|
108 |
+
#apm_faiss = "apm_store"
|
109 |
+
#faiss_index = 'apm_faiss_index'
|
110 |
+
|
111 |
+
###################################
|
112 |
+
##COLLECTION of re-usable functions
|
113 |
+
###################################
|
114 |
+
|
115 |
+
#return current date-time
|
116 |
+
def _get_datetime():
|
117 |
+
now = datetime.datetime.now()
|
118 |
+
return now.strftime("%m/%d/%Y, %H:%M:%S")
|
119 |
+
|
120 |
+
def _get_formatted_date():
|
121 |
+
current_date = datetime.datetime.now()
|
122 |
+
formatted_date = current_date.strftime("%d %B %Y")
|
123 |
+
|
124 |
+
return formatted_date
|
125 |
+
|
126 |
+
#calculate dif end-start execution
|
127 |
+
def time_elapsed(start,end):
|
128 |
+
time_elapsed = int(round(end - start, 0))
|
129 |
+
time_elapsed_str = f'{time_elapsed}'
|
130 |
+
|
131 |
+
return time_elapsed_str
|
132 |
+
|
133 |
+
def _join_paths(*paths):
|
134 |
+
"""
|
135 |
+
Join two or more paths using os.path.join.
|
136 |
+
|
137 |
+
Parameters:
|
138 |
+
*paths: str
|
139 |
+
Two or more path components to be joined.
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
str
|
143 |
+
The joined path.
|
144 |
+
"""
|
145 |
+
return os.path.join(*paths)
|
146 |
+
|
147 |
+
#get user request info
|
148 |
+
def get_user_identification(request):
|
149 |
+
if request:
|
150 |
+
try:
|
151 |
+
user_pip = request.headers.get('X-Forwarded-For')
|
152 |
+
return user_pip.split(',')[0]
|
153 |
+
except Exception:
|
154 |
+
print(f"user info: {request}")
|
155 |
+
return request.client.host
|
156 |
+
|
157 |
+
return "ea4all_agent"
|
158 |
+
|
159 |
+
#Initialise model
|
160 |
+
## SETUP LLM CLIENT
|
161 |
+
def get_llm_client(model, api_base_url=None,temperature=0, streaming=False, tokens=ea4all_config.max_tokens) -> ChatOpenAI:
|
162 |
+
"""Initializes and returns a ChatOpenAI client based on the specified model and parameters."""
|
163 |
+
client = ChatOpenAI()
|
164 |
+
|
165 |
+
if model.startswith("gpt-"):
|
166 |
+
client = ChatOpenAI(
|
167 |
+
model=model,
|
168 |
+
temperature=temperature,
|
169 |
+
streaming=streaming,
|
170 |
+
max_completion_tokens=tokens,
|
171 |
+
stream_usage=True
|
172 |
+
)
|
173 |
+
elif "llama" in model.lower(): # Meta-llama models
|
174 |
+
client = ChatOpenAI(
|
175 |
+
model=model,
|
176 |
+
api_key=SecretStr(os.environ['HUGGINGFACEHUB_API_TOKEN']),
|
177 |
+
base_url=_join_paths(api_base_url, model, "v1/"),
|
178 |
+
temperature=temperature,
|
179 |
+
streaming=streaming,
|
180 |
+
max_completion_tokens=tokens,
|
181 |
+
stream_usage=True,
|
182 |
+
)
|
183 |
+
|
184 |
+
return client
|
185 |
+
|
186 |
+
#load local env variables
|
187 |
+
def load_local_env(local):
|
188 |
+
###read local .env file
|
189 |
+
_ = load_dotenv(find_dotenv())
|
190 |
+
if local not in os.environ:
|
191 |
+
os.environ[local] = getpass.getpass(f"Provide your {local} Key")
|
192 |
+
return os.environ[local]
|
193 |
+
|
194 |
+
#locad landscape data into chroma
|
195 |
+
def load_to_chroma(documents, embeddings, path, collection_name="apm_collection"):
|
196 |
+
#Read chromadb chroma-apm-db
|
197 |
+
chroma_collection = Chroma (
|
198 |
+
collection_name=collection_name,
|
199 |
+
persist_directory=path,
|
200 |
+
embedding_function=embeddings
|
201 |
+
)
|
202 |
+
|
203 |
+
if chroma_collection._collection.count():
|
204 |
+
chroma_collection.delete_collection()
|
205 |
+
else:
|
206 |
+
#Add apm records
|
207 |
+
chroma_collection = Chroma.from_documents(
|
208 |
+
collection_name=collection_name,
|
209 |
+
persist_directory=path,
|
210 |
+
documents=documents,
|
211 |
+
embedding=embeddings
|
212 |
+
)
|
213 |
+
chroma_collection.persist()
|
214 |
+
|
215 |
+
return chroma_collection
|
216 |
+
|
217 |
+
##Convert gradio chat_history to langchain chat_history_format
|
218 |
+
def get_history_gradio(history, chat_history=[]):
|
219 |
+
history_langchain_format = []
|
220 |
+
#triggered by loaded memory runnable to replace ConversationMemoryBuffer.load_memory_variables
|
221 |
+
#if chat_history or not history:
|
222 |
+
# memory = chat_history
|
223 |
+
#triggered by loaded_memory runnable
|
224 |
+
#else:
|
225 |
+
history = history["chat_memory"]
|
226 |
+
|
227 |
+
for human, ai in history:
|
228 |
+
history_langchain_format.append(HumanMessage(content=human))
|
229 |
+
history_langchain_format.append(AIMessage(content=ai))
|
230 |
+
|
231 |
+
history = {"history":get_buffer_string(history_langchain_format)}
|
232 |
+
|
233 |
+
return history
|
234 |
+
|
235 |
+
#retrieve relevant questions based on user interaction
|
236 |
+
def get_vaq_examples():
|
237 |
+
examples=[
|
238 |
+
{"text": "Describe this image.", "files": ["ea4all/images/multi-app-architecture.png"]},
|
239 |
+
{"text": "Assess any risk and vulnerabilities in the current solution.", "files": ["ea4all/images/ea4all_architecture.png"]},
|
240 |
+
]
|
241 |
+
return examples
|
242 |
+
|
243 |
+
# Function to encode the image
|
244 |
+
def encode_image(image_path):
|
245 |
+
with open(image_path, "rb") as image_file:
|
246 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
247 |
+
|
248 |
+
def resize_image_1(raw_image, input_size):
|
249 |
+
w, h = raw_image.size
|
250 |
+
scale = input_size / max(w, h)
|
251 |
+
new_w = int(w * scale)
|
252 |
+
new_h = int(h * scale)
|
253 |
+
resized_image = raw_image.resize((new_w, new_h))
|
254 |
+
|
255 |
+
return resized_image
|
256 |
+
|
257 |
+
def resize_image_2(image, width):
|
258 |
+
wpercent = width / float(image.size[0])
|
259 |
+
hsize = int( float(image.size[1]) * wpercent )
|
260 |
+
raw_image = image.resize([width, hsize])
|
261 |
+
|
262 |
+
return raw_image
|
263 |
+
|
264 |
+
def resize_image_3(image):
|
265 |
+
from PIL import Image
|
266 |
+
# Get the current size
|
267 |
+
width, height = image.size
|
268 |
+
|
269 |
+
# Calculate the new size maintaining the aspect ratio
|
270 |
+
if width > CFG.MAX_WIDTH or height > CFG.MAX_HEIGHT:
|
271 |
+
ratio = min(CFG.MAX_WIDTH / width, CFG.MAX_HEIGHT / height)
|
272 |
+
new_width = int(width * ratio)
|
273 |
+
new_height = int(height * ratio)
|
274 |
+
else:
|
275 |
+
new_width, new_height = width, height
|
276 |
+
|
277 |
+
# Resize the image
|
278 |
+
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
279 |
+
|
280 |
+
# Return new resized image
|
281 |
+
return image
|
282 |
+
|
283 |
+
#Encode PIL.Image to base64
|
284 |
+
def encode_raw_image(raw_image):
|
285 |
+
# Create a BytesIO buffer
|
286 |
+
buffer = BytesIO()
|
287 |
+
|
288 |
+
# Save the image to the buffer in PNG format
|
289 |
+
raw_image.save(buffer, format='PNG')
|
290 |
+
|
291 |
+
# Get the content of the buffer
|
292 |
+
img_bytes = buffer.getvalue()
|
293 |
+
|
294 |
+
# Encode the bytes to base64
|
295 |
+
img_base64 = base64.b64encode(img_bytes)
|
296 |
+
|
297 |
+
# Convert the bytes to string
|
298 |
+
img_str = img_base64.decode('utf-8')
|
299 |
+
|
300 |
+
return img_str
|
301 |
+
|
302 |
+
#Return a raw image ready to OpenAI GPT4-Vision
|
303 |
+
def get_raw_image(image_path):
|
304 |
+
# Open & Resize & Encode image
|
305 |
+
diagram = Image.open(image_path)
|
306 |
+
w, h = diagram.size
|
307 |
+
if w > CFG.RESIZE_TO or h > CFG.RESIZE_TO:
|
308 |
+
resized_image = resize_image_3(diagram)
|
309 |
+
else:
|
310 |
+
resized_image = diagram
|
311 |
+
|
312 |
+
#Encode diagram
|
313 |
+
raw_image = encode_raw_image(resized_image)
|
314 |
+
|
315 |
+
return raw_image
|
316 |
+
|
317 |
+
def load_mock_content(file_path):
|
318 |
+
try:
|
319 |
+
with open(_join_paths(ea4all_config.ea4all_store,file_path), "r") as file:
|
320 |
+
content = file.read()
|
321 |
+
return content
|
322 |
+
except ValueError as e:
|
323 |
+
return e
|
324 |
+
|
325 |
+
def print_json_to_md(data, indent=0, column=None):
|
326 |
+
try:
|
327 |
+
result = ""
|
328 |
+
header = ""
|
329 |
+
body = ""
|
330 |
+
if isinstance(data, dict):
|
331 |
+
for key, value in data.items():
|
332 |
+
result += print_json_to_md(value, indent + 2, key)
|
333 |
+
return result
|
334 |
+
elif isinstance(data, list):
|
335 |
+
if column: # Print list items as a Markdown table
|
336 |
+
header = ' ' * indent + f"| {' | '.join(data[0].keys())} | \n"
|
337 |
+
header += ' ' * indent + f"| {' | '.join(['---'] * len(data[0]))} | \n"
|
338 |
+
for item in data:
|
339 |
+
body += ' ' * indent + f"\n\n | {' | '.join(str(item[k]) for k in item.keys())} |"
|
340 |
+
result += header + body
|
341 |
+
return result
|
342 |
+
else:
|
343 |
+
for item in data:
|
344 |
+
header = ' ' * indent + f"| {' | '.join(data[0].keys())} |"
|
345 |
+
body += ' ' * indent + f"\n\n | {' | '.join(str(item[k]) for k in item.keys())} |"
|
346 |
+
result += header + "\n" + body
|
347 |
+
return result
|
348 |
+
else:
|
349 |
+
header += ' ' * indent + f"| {column} "
|
350 |
+
body += f"{str(data)}\n\n"
|
351 |
+
result += header + body
|
352 |
+
return result
|
353 |
+
|
354 |
+
except Exception as e:
|
355 |
+
return f"{e} - {data}"
|
356 |
+
|
357 |
+
def markdown_to_plain_text(md):
|
358 |
+
# Convert Markdown to HTML
|
359 |
+
html = markdown.markdown(md)
|
360 |
+
# Convert HTML to plain text using markdownify
|
361 |
+
plain_text = md2text(html)
|
362 |
+
return plain_text
|
363 |
+
|
364 |
+
def extract_structured_output(response):
|
365 |
+
##EXTRACT Topic from the content
|
366 |
+
try:
|
367 |
+
return json.loads(response)
|
368 |
+
except ValueError:
|
369 |
+
match = re.search(CFG.REGEX_BACKTICKS, response, re.DOTALL)
|
370 |
+
|
371 |
+
if match:
|
372 |
+
return json.loads(match.group(1))
|
373 |
+
else:
|
374 |
+
return None
|
375 |
+
|
376 |
+
def get_predicted_num_tokens(llm, content):
|
377 |
+
return llm.get_num_tokens(content)
|
378 |
+
|
379 |
+
def get_predicted_num_tokens_from_prompt(llm, prompt, values):
|
380 |
+
final_prompt = prompt.format(**values)
|
381 |
+
return llm.get_num_tokens(final_prompt)
|
382 |
+
|
383 |
+
def set_max_new_tokens(predicted_tokens):
|
384 |
+
#Return max new tokens to be generated
|
385 |
+
return int((ea4all_config.max_tokens - predicted_tokens) * 0.95)
|
386 |
+
|
387 |
+
def escape_special_characters(input_string):
|
388 |
+
# Use json.dumps to escape special characters
|
389 |
+
escaped_string = json.dumps(input_string)
|
390 |
+
# Remove the surrounding double quotes added by json.dumps
|
391 |
+
return escaped_string[1:-1]
|
392 |
+
|
393 |
+
def clean_and_load_json(content) -> dict:
|
394 |
+
try:
|
395 |
+
json_data = json.loads(content)
|
396 |
+
return json_data
|
397 |
+
except ValueError:
|
398 |
+
clean_string = content.replace("\n","").replace("json","")
|
399 |
+
json_data = json.loads(clean_string)
|
400 |
+
return json_data
|
401 |
+
|
402 |
+
def extract_response_from_backticks(response):
|
403 |
+
pattern = r"```(.*?)```"
|
404 |
+
match = re.search(pattern, str(response), re.DOTALL)
|
405 |
+
|
406 |
+
return match.group(1) if match else response
|
407 |
+
|
408 |
+
def extract_topic_from_business_input(response) -> dict:
|
409 |
+
##IS JSON already
|
410 |
+
if isinstance(response, dict):
|
411 |
+
return response
|
412 |
+
|
413 |
+
##EXTRACT Topic from the content
|
414 |
+
topic = extract_response_from_backticks(response)
|
415 |
+
|
416 |
+
return clean_and_load_json(topic)
|
417 |
+
|
418 |
+
## LLM STRUCTURED OUTPUT Helper functions
|
419 |
+
def extract_landscape(topic):
|
420 |
+
# Prompt
|
421 |
+
extract_landscape_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_landscape_business_query')
|
422 |
+
|
423 |
+
# Set up a parser: LandscapeAsIs
|
424 |
+
parser = PydanticOutputParser(pydantic_object=topic)
|
425 |
+
|
426 |
+
final_prompt = extract_landscape_prompt.partial(
|
427 |
+
format_instructions=parser.get_format_instructions(),
|
428 |
+
ai_output=LLAMA31_CHAT_PROMPT_FORMAT,
|
429 |
+
)
|
430 |
+
|
431 |
+
return final_prompt
|
432 |
+
|
433 |
+
def extract_principles(topic):
|
434 |
+
|
435 |
+
# Set up a parser: LandscapeAsIs
|
436 |
+
parser = PydanticOutputParser(pydantic_object=topic)
|
437 |
+
|
438 |
+
#PROMPT REVISED TO WORK w/ Llama-3
|
439 |
+
principle_template = """Identify the list of principles and its meaning from the given context.
|
440 |
+
Do not add any superfluous information.
|
441 |
+
Context: \n {strategic_principles} \n
|
442 |
+
Output your answer as JSON that matches the given schema and nothing else: \n{format_instructions}\n
|
443 |
+
"""
|
444 |
+
|
445 |
+
prompt = PromptTemplate(
|
446 |
+
template=principle_template,
|
447 |
+
input_variables=["strategic_principles"],
|
448 |
+
partial_variables={
|
449 |
+
"format_instructions": parser.get_format_instructions(),
|
450 |
+
},
|
451 |
+
)
|
452 |
+
|
453 |
+
return prompt
|
454 |
+
|
455 |
+
# Task-1: Identify the business requirements, objectives, user journey, and all other relevant information
|
456 |
+
def extract_detailed_business_requirements(llm, topic: type[BaseModel], name:str, values:dict):
|
457 |
+
parser = PydanticOutputParser(pydantic_object=topic)
|
458 |
+
|
459 |
+
hub_prompt = hub.pull('learn-it-all-do-it-all/ea4all_extract_business_topic')
|
460 |
+
hub_prompt = hub_prompt.partial(
|
461 |
+
topic=name,
|
462 |
+
format_instructions=parser.get_format_instructions(),
|
463 |
+
)
|
464 |
+
|
465 |
+
task_1_requirement = hub_prompt | llm | parser
|
466 |
+
response = task_1_requirement.invoke(
|
467 |
+
input=values,
|
468 |
+
config={
|
469 |
+
'tags': ['assess_business_query'],
|
470 |
+
'run_name': name # Custom run name
|
471 |
+
}
|
472 |
+
)
|
473 |
+
|
474 |
+
return response
|
475 |
+
|
476 |
+
# Post-processing
|
477 |
+
def format_docs(docs):
|
478 |
+
return "\n".join(doc.page_content for doc in docs)
|
ea4all/src/shared/vectorstore.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.runnables import RunnableConfig
|
2 |
+
from langchain.docstore.document import Document
|
3 |
+
from langchain_core.embeddings import Embeddings
|
4 |
+
from langchain_core.vectorstores import VectorStoreRetriever
|
5 |
+
|
6 |
+
import ast
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
from contextlib import contextmanager
|
10 |
+
from typing import Generator
|
11 |
+
|
12 |
+
from ea4all.src.shared.utils import _join_paths
|
13 |
+
from ea4all.src.shared.configuration import BaseConfiguration
|
14 |
+
|
15 |
+
global _vectorstore
|
16 |
+
_vectorstore = None
|
17 |
+
|
18 |
+
def make_text_encoder(model: str) -> Embeddings:
|
19 |
+
"""Connect to the configured text encoder."""
|
20 |
+
provider, model = model.split("/", maxsplit=1)
|
21 |
+
match provider:
|
22 |
+
case "openai":
|
23 |
+
from langchain_openai import OpenAIEmbeddings
|
24 |
+
|
25 |
+
return OpenAIEmbeddings(model=model)
|
26 |
+
case _:
|
27 |
+
raise ValueError(f"Unsupported embedding provider: {provider}")
|
28 |
+
|
29 |
+
@contextmanager
|
30 |
+
def make_faiss_retriever(
|
31 |
+
configuration: BaseConfiguration, embeddings: Embeddings
|
32 |
+
) -> Generator[VectorStoreRetriever, None, None]:
|
33 |
+
"""Configure this agent to connect to a FAISS index & namespaces."""
|
34 |
+
from langchain_community.docstore.in_memory import InMemoryDocstore
|
35 |
+
from langchain_community.vectorstores import FAISS
|
36 |
+
import faiss
|
37 |
+
|
38 |
+
global _vectorstore
|
39 |
+
|
40 |
+
if _vectorstore is None:
|
41 |
+
try:
|
42 |
+
_vectorstore = FAISS.load_local(
|
43 |
+
folder_path=configuration.ea4all_store,
|
44 |
+
embeddings=embeddings,
|
45 |
+
index_name=configuration.apm_faiss,
|
46 |
+
allow_dangerous_deserialization=True)
|
47 |
+
|
48 |
+
except Exception as e:
|
49 |
+
# Create an empty index
|
50 |
+
index = faiss.IndexFlatL2(len(embeddings.embed_query("")))
|
51 |
+
|
52 |
+
#Initialize an empty FAISS vectorstore
|
53 |
+
_vectorstore = FAISS(
|
54 |
+
embedding_function=embeddings,
|
55 |
+
index=index,
|
56 |
+
docstore=InMemoryDocstore(),
|
57 |
+
index_to_docstore_id={},
|
58 |
+
)
|
59 |
+
#apm_docs = get_apm_excel_content(configuration)
|
60 |
+
#_vectorstore = FAISS.from_documents(apm_docs, embeddings)
|
61 |
+
#_vectorstore.save_local(folder_path=configuration.ea4all_store, index_name=configuration.apm_faiss,)
|
62 |
+
|
63 |
+
search_kwargs = configuration.search_kwargs
|
64 |
+
|
65 |
+
yield _vectorstore.as_retriever(search_type="similarity", search_kwargs=search_kwargs)
|
66 |
+
|
67 |
+
@contextmanager
|
68 |
+
def make_retriever(
|
69 |
+
config: RunnableConfig,
|
70 |
+
) -> Generator[VectorStoreRetriever, None, None]:
|
71 |
+
"""Create a retriever for the agent, based on the current configuration."""
|
72 |
+
configuration = BaseConfiguration.from_runnable_config(config)
|
73 |
+
embeddings = make_text_encoder(configuration.embedding_model)
|
74 |
+
match configuration.retriever_provider:
|
75 |
+
case "faiss":
|
76 |
+
with make_faiss_retriever(configuration, embeddings) as retriever:
|
77 |
+
yield retriever
|
78 |
+
|
79 |
+
case _:
|
80 |
+
raise ValueError(
|
81 |
+
"Unrecognized retriever_provider in configuration. "
|
82 |
+
f"Expected one of: {', '.join(BaseConfiguration.__annotations__['retriever_provider'].__args__)}\n"
|
83 |
+
f"Got: {configuration.retriever_provider}"
|
84 |
+
)
|
85 |
+
|
86 |
+
#convert dataframe to langchain document structure, added user_ip
|
87 |
+
def panda_to_langchain_document(dataframe,user_ip):
|
88 |
+
# create an empty list to store the documents
|
89 |
+
apm_documents = []
|
90 |
+
# iterate over the rows of the dataframe
|
91 |
+
for index, row in dataframe.iterrows():
|
92 |
+
# create a document object from the row values for all df columns
|
93 |
+
page_content = ""
|
94 |
+
application = ""
|
95 |
+
capability = ""
|
96 |
+
description = ""
|
97 |
+
fit = ""
|
98 |
+
roadmap = ""
|
99 |
+
for column in dataframe.columns:
|
100 |
+
column = ' '.join(column.split())
|
101 |
+
page_content += f" {column}:{row[column]}"
|
102 |
+
if 'application' in column.lower(): application = row[column]
|
103 |
+
elif 'capabilit' in column.lower(): capability = row[column]
|
104 |
+
elif 'desc' in column.lower(): description = row[column]
|
105 |
+
elif 'business fit' in column.lower(): fit = row[column]
|
106 |
+
elif 'roadmap' in column.lower(): roadmap = row[column]
|
107 |
+
doc = Document(
|
108 |
+
page_content=page_content,
|
109 |
+
metadata={
|
110 |
+
"source": application,
|
111 |
+
"capability": capability,
|
112 |
+
"description": description,
|
113 |
+
"business fit": fit,
|
114 |
+
"roadmap": roadmap,
|
115 |
+
"row_number": index, "namespace": user_ip}
|
116 |
+
)
|
117 |
+
# append the document object to the list
|
118 |
+
apm_documents.append(doc)
|
119 |
+
return(apm_documents)
|
120 |
+
|
121 |
+
#local landscape data (excel file)
|
122 |
+
def apm_dataframe_loader(file):
|
123 |
+
pd.set_option('display.max_colwidth', None)
|
124 |
+
df = pd.read_excel(file)
|
125 |
+
df = df.dropna(axis=0, how='all')
|
126 |
+
df = df.dropna(axis=1, how='all')
|
127 |
+
df.fillna('NaN')
|
128 |
+
|
129 |
+
return df
|
130 |
+
|
131 |
+
##New APM Excel loader
|
132 |
+
#Removed df from return
|
133 |
+
def get_apm_excel_content(config:RunnableConfig, file=None, user_ip="ea4all_agent"):
|
134 |
+
|
135 |
+
if file is None:
|
136 |
+
file = _join_paths(
|
137 |
+
getattr(config, "ea4all_store", BaseConfiguration.ea4all_store),
|
138 |
+
getattr(config, "apm_catalogue", BaseConfiguration.apm_catalogue)
|
139 |
+
)
|
140 |
+
|
141 |
+
#load file into dataframe
|
142 |
+
df = apm_dataframe_loader(file)
|
143 |
+
#add user_id into df
|
144 |
+
df['namespace'] = user_ip
|
145 |
+
|
146 |
+
apm_docs = panda_to_langchain_document(df, user_ip)
|
147 |
+
return apm_docs
|
148 |
+
|
149 |
+
def remove_user_apm_faiss(config, db, ea4all_user):
|
150 |
+
#apm_vectorstore.docstore.__dict__["_dict"][apm_vectorstore.index_to_docstore_id[0]].metadata
|
151 |
+
|
152 |
+
#check if user's uploaded any apm before
|
153 |
+
byod = ea4all_user in str(db.docstore._dict.values())
|
154 |
+
|
155 |
+
#if yes
|
156 |
+
if byod:
|
157 |
+
removed_ids = []
|
158 |
+
for id, doc in db.docstore._dict.items():
|
159 |
+
if doc.metadata['namespace'] == ea4all_user:
|
160 |
+
removed_ids.append(id)
|
161 |
+
|
162 |
+
##save updated index
|
163 |
+
if removed_ids:
|
164 |
+
index_ids = [
|
165 |
+
i_id
|
166 |
+
for i_id, d_id in db.index_to_docstore_id.items()
|
167 |
+
if d_id in removed_ids
|
168 |
+
]
|
169 |
+
#Remove ids from docstore
|
170 |
+
db.delete(ids=removed_ids)
|
171 |
+
#Remove the corresponding embeddings from the FAISS index
|
172 |
+
db.index.remove_ids(np.array(index_ids,dtype=np.int64))
|
173 |
+
#Reorg embeddings
|
174 |
+
db.index_to_docstore_id = {
|
175 |
+
i: d_id
|
176 |
+
for i, d_id in enumerate(db.index_to_docstore_id.values())
|
177 |
+
}
|
178 |
+
#save updated index
|
179 |
+
db.save_local(folder_path=config.ea4all_store, index_name=config.apm_faiss)
|
180 |
+
|
181 |
+
#Get faiss index as a retriever
|
182 |
+
def retriever_faiss(db, user_ip="ea4all_agent"):
|
183 |
+
##size: len(retriever.vectorstore.index_to_docstore_id), retriever.vectorstore.index.ntotal
|
184 |
+
|
185 |
+
#check if user's BYOData
|
186 |
+
byod = user_ip in str(db.docstore._dict.values())
|
187 |
+
|
188 |
+
if byod==False:
|
189 |
+
namespace="ea4all_agent"
|
190 |
+
else:
|
191 |
+
namespace = user_ip
|
192 |
+
|
193 |
+
retriever = db.as_retriever(search_type="similarity",
|
194 |
+
search_kwargs={'k': 50, 'score_threshold': 0.8, 'filter': {'namespace':namespace}})
|
195 |
+
|
196 |
+
return retriever
|
ea4all/src/tools/tools.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Literal, Annotated
|
2 |
+
from typing_extensions import TypedDict
|
3 |
+
import json
|
4 |
+
import tempfile
|
5 |
+
|
6 |
+
from langchain_core.runnables import RunnableConfig
|
7 |
+
|
8 |
+
from langgraph.graph import END
|
9 |
+
from langgraph.types import Command
|
10 |
+
from langgraph.prebuilt import InjectedState
|
11 |
+
|
12 |
+
from langchain_community.utilities import BingSearchAPIWrapper
|
13 |
+
from langchain_community.tools.bing_search.tool import BingSearchResults
|
14 |
+
from langchain_community.document_loaders import JSONLoader
|
15 |
+
|
16 |
+
from langchain.agents import tool
|
17 |
+
|
18 |
+
from ea4all.src.shared.configuration import (
|
19 |
+
BaseConfiguration
|
20 |
+
)
|
21 |
+
|
22 |
+
from ea4all.src.shared.state import (
|
23 |
+
State
|
24 |
+
)
|
25 |
+
|
26 |
+
from ea4all.src.shared.utils import (
|
27 |
+
get_llm_client,
|
28 |
+
format_docs,
|
29 |
+
)
|
30 |
+
|
31 |
+
def make_supervisor_node(config: RunnableConfig, members: list[str]) -> str:
|
32 |
+
options = ["FINISH"] + members
|
33 |
+
system_prompt = (
|
34 |
+
"You are a supervisor tasked with managing a conversation between the"
|
35 |
+
f" following workers: {members}. Given the following user request,"
|
36 |
+
" respond with the worker to act next. Each worker will perform a"
|
37 |
+
" task and respond with their results and status. When finished,"
|
38 |
+
" respond with FINISH."
|
39 |
+
)
|
40 |
+
|
41 |
+
configuration = BaseConfiguration.from_runnable_config(config)
|
42 |
+
model = get_llm_client(
|
43 |
+
configuration.supervisor_model,
|
44 |
+
api_base_url="",
|
45 |
+
)
|
46 |
+
|
47 |
+
class Router(TypedDict):
|
48 |
+
"""Worker to route to next. If no workers needed, route to FINISH."""
|
49 |
+
|
50 |
+
next: Literal[*options]
|
51 |
+
|
52 |
+
def supervisor_node(state: State) -> Command[Literal[*members, "__end__"]]:
|
53 |
+
"""An LLM-based router."""
|
54 |
+
messages = [
|
55 |
+
{"role": "system", "content": system_prompt},
|
56 |
+
] + state["messages"]
|
57 |
+
response = model.with_structured_output(Router).invoke(messages)
|
58 |
+
goto = response["next"]
|
59 |
+
if goto == "FINISH":
|
60 |
+
goto = END
|
61 |
+
|
62 |
+
return Command(goto=goto, update={"next": goto})
|
63 |
+
|
64 |
+
return supervisor_node
|
65 |
+
|
66 |
+
async def websearch(state: State):
|
67 |
+
"""
|
68 |
+
Web search based on the re-phrased question.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
state (dict): The current graph state
|
72 |
+
config (RunnableConfig): Configuration with the model used for query analysis.
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
state (dict): Updates documents key with appended web results
|
76 |
+
"""
|
77 |
+
|
78 |
+
##API Wrapper
|
79 |
+
search = BingSearchAPIWrapper()
|
80 |
+
|
81 |
+
question = state.get('messages')[-1].content
|
82 |
+
|
83 |
+
##Bing Search Results
|
84 |
+
web_results = BingSearchResults(
|
85 |
+
k=5,
|
86 |
+
api_wrapper=search,
|
87 |
+
handle_tool_error=True,
|
88 |
+
)
|
89 |
+
|
90 |
+
result = await web_results.ainvoke({"query": question})
|
91 |
+
|
92 |
+
fixed_string = result.replace("'", "\"")
|
93 |
+
result_json = json.loads(fixed_string)
|
94 |
+
|
95 |
+
# Create a temporary file
|
96 |
+
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
|
97 |
+
# Write the JSON data to the temporary file
|
98 |
+
json.dump(result_json, temp_file)
|
99 |
+
temp_file.flush()
|
100 |
+
|
101 |
+
# Load the JSON data from the temporary file
|
102 |
+
loader = JSONLoader(file_path=temp_file.name, jq_schema=".[]", text_content=False)
|
103 |
+
docs = loader.load()
|
104 |
+
|
105 |
+
return {"messages": {"role":"assistant", "content":format_docs(docs)}}
|