File size: 17,382 Bytes
7042c3c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 |
import os
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate
)
from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
from langchain_core.prompts import MessagesPlaceholder, format_document
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain_core.messages import (
HumanMessage,
)
from langchain_core.output_parsers import (
JsonOutputParser
)
from langsmith import (
traceable,
)
################################
##COLLECTION of prompt functions
################################
##Llama-3.1 Prompt Format
# Define the prompt format with special tokens
LLAMA31_CHAT_PROMPT_FORMAT = (
"<|begin_of_text|>"
"<|start_header_id|>system<|end_header_id|>{system_message}<|eot_id|>\n"
"<|start_header_id|>user<|end_header_id|>{human_message}<|eot_id|>\n"
"<|start_header_id|>ai<|end_header_id|>{ai_message}\n"
)
LLAMA31_PROMPT_FORMAT = (
"<|begin_of_text|>"
"<|start_header_id|>system<|end_header_id|>{system_message}<|eot_id|>\n"
"<|start_header_id|>user<|end_header_id|>{human_message}<|eot_id|>\n"
"<|start_header_id|>ai<|end_header_id|>{ai_message}\n"
)
##return a prompt-template class with informed user inquiry
def ea4all_prompt(query):
prompt_template = PromptTemplate(
input_variables=["query", "answer"],
template=TEMPLATE_QUERY_ANSWER)
prompt = prompt_template.format(
query=query,
answer="")
return prompt
##return a chat-prompt-template class from the informed template
def ea4all_chat_prompt(template):
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{user_question}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
ea4all_prompt = ChatPromptTemplate.from_messages(
messages=[
system_message_prompt,
## MessagesPlaceholder(variable_name="history"),
human_message_prompt],
)
ea4all_prompt.output_parser=JsonOutputParser()
return ea4all_prompt
##select best prompt based on user inquiry's category
@traceable(
tags={os.environ["EA4ALL_ENV"]}
)
def ea4ll_prompt_selector(category):
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt = ea4all_chat_prompt(GENERAL_TEMPLATE),
conditionals=[
(lambda category: True if category == "Strategy" else False, ea4all_chat_prompt(STRATEGY_TEMPLATE)),
(lambda category: True if category == "Application" else False, ea4all_chat_prompt(APPLICATION_TEMPLATE)),
(lambda category: True if category == "Recommendation" else False, ea4all_chat_prompt(RECOMMENDATION_TEMPLATE)),
(lambda category: True if category not in ("Strategy","Application", "Recommendation") else False, ea4all_chat_prompt(GENERAL_TEMPLATE))
]
)
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(category)
return(prompt)
#######################
##COLLECTION of prompts
#######################
##Template-basic instruction + context
TEMPLATE_CONTEXT = """You are a helpful Enterprise Architect with knowledge on enterprises IT landscapes.
Use only the context delimited by trible backticks to answer questions. Return the answer formatted as a text paragraph.
If you don't know the answer return I could not find the information.
Don't make up the response.
Context: ```{cdocs}```
Help answer: ""
"""
##Template-basic instruction + question + answer
TEMPLATE_QUERY_ANSWER = """You are Enterprise Architect highly knowledgable on IT landscape. \
Answer the question that is delimited by triple backticks into a style that is bullet list. \
If the question cannot be answered using the information provided answer with "I don't know". \
Always say "thanks for asking!" at the end of the answer.
Question: ```{user_question}```
Answer: {answer}
"""
TEMPLATE_APM_QNA_ROUTING = """application portfolio assessment, application/IT landscape rationalisation, simplification or optimisation, business capability assessment, line of business landscape, who can I talk to, assistance from architecture team."""
##Template-break-into-simpler-tasks
#https://platform.openai.com/docs/guides/prompt-engineering/strategy-split-complex-tasks-into-simpler-subtasks
TEMPLATE_HEADER = """You are a helpful enterprise architect assistant. """
TEMPLATE_HEADER += """Your goal is to provide accurate and detailed responses to user inquiry. """
TEMPLATE_HEADER += """You have access to a vast amount of enterprise architecture knowledge, """
TEMPLATE_HEADER += """and you can understand and generate language fluently. """
TEMPLATE_HEADER += """You can assist with a wide range of architectural topics, including but not limited to """
TEMPLATE_HEADER += """business, application, data and technology architectures. """
TEMPLATE_HEADER += """You should always strive to promote a positive and respectful conversation.
"""
TEMPLATE_TASKS = ""
TEMPLATE_TASKS += """You will be provided with a user inquiry. """
TEMPLATE_TASKS += """Classify the inquiry into primary category and secondary category. """
TEMPLATE_TASKS += """Primary categories: Strategy, Application, Recommendation or General Inquiry. """
TEMPLATE_TASKS += """Strategy secondary categories:
- Architecture and Technology Strategy
- Vision
- Architecture Principles
"""
TEMPLATE_TASKS += """Application secondary categories:
- Meet business and technical need
- Business criticality
- Roadmap
- Business Capability
- Hosting
"""
TEMPLATE_TASKS += """Recommendation secondary categories:
- Application rationalisation
- Landscape simplification
- Reuse existent invested application
- Business capability with overlapping applications
- Opportunities and innovation
"""
TEMPLATE_TASKS += """General inquiry:
- Speak to an architect
"""
TEMPLATE_TASKS += """You may also revise the original inquiry if you think that revising \
it will ultimately lead to a better response from the language model """
TEMPLATE_TASKS += """Provide your output in JSON format with the keys: primary, secondary, question.
"""
#Template-break-into-specific-prompt-by-category
strategy_template = """You will be provided with inquiry about architecture strategy.
Follow these steps to answer user inquiry:
STEP 1 - Using only the context delimited by triple backticks.
STEP 2 - Look at application with roadmap to invest.
STEP 3 - Extract the information that is only relevant to help answer the user inquiry
"""
application_template = """You will be provided with an inquiry about application architecture.
Follow these steps to answer user inquiry:
STEP 1 - Using only the context delimited by triple backticks.
STEP 2 - Extract the information that is only relevant to help answer the user inquiry
"""
recommendation_template = """You will be provided with enterprise architecture inquiry that needs a recommendation.
Follow these steps to answer user inquiry:
STEP 1 - Use only the context delimited by triple backticks.
STEP 2 - Look at applications with low business or technical fit
STEP 3 - Look at application with roadmap diffent to invest
STEP 4 - Look at applicatins hosted on premise
STEP 5 - Look at Business capability with overlapping applications
"""
general_template = """You will provided with a general inquiry about enterprise architecture IT landscape.
Follow these steps to answer user queries:
STEP 1 - use only the context delimited by triple backticks
STEP 2 - Extract the information that is only relevant to help answer the user inquiry
"""
default_template = """
FINAL STEP - Do not make up or guess ANY extra information. \
Ask follow-up question to the user if you need further clarification to understand and answer their inquiry. \
After a follow-up question if you still don't know the answer or don't find specific information needed to answer the user inquiry \
return I could not find the information. \
Ensure that the response contain all relevant context needed to interpret them -
in other words don't extract small snippets that are missing important context.
Format the output as top-like string formatted with the most appropriate style to make it clear, concise and user-friendly for a chatbot response.
Here is the question: {user_question}
Here is the context: ```{cdocs}```
"""
STRATEGY_TEMPLATE = TEMPLATE_HEADER + strategy_template + default_template
APPLICATION_TEMPLATE = TEMPLATE_HEADER + application_template + default_template
RECOMMENDATION_TEMPLATE = TEMPLATE_HEADER + recommendation_template + default_template
GENERAL_TEMPLATE = TEMPLATE_HEADER + general_template + default_template
###############################################
##COLLECTION of prompts for conversation memory
###############################################
_template = """Given the following conversation and a follow up question,\
rephrase the follow up question to be a standalone question, in its original language.\
Chat History:
{chat_history}
Follow Up Input: {user_question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
def _combine_documents(
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings)
##################################################
##COLLECTION of prompts - RAG query transformation
##################################################
## Multi Query
# Prompt
multiquery_template = """You are an AI Enterprise Architect language model assistant. Your task is to generate five
different versions of the given user question to retrieve relevant documents from a vector
database. By generating multiple perspectives on the user question, your goal is to help
the user overcome some of the limitations of the distance-based similarity search.
Provide these alternative questions separated by newlines. Original question: {standalone_question}"""
decomposition_template = """You are a helpful enterprise architect assistant that generates multiple sub-questions related to an input question. \n
The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n
Generate multiple search queries related to: {user_question} \n
Output (3 queries):"""
decomposition_answer_recursevely_template = """
Here is the question you need to answer:
\n --- \n {question} \n --- \n
Here is any available background question + answer pairs:
\n --- \n {q_a_pairs} \n --- \n
Here is additional context relevant to the question:
\n --- \n {context} \n --- \n
Use the above context and any background question + answer pairs to answer the question: \n {user_question}
"""
rag_fusion_questions_template = """You are a helpful enterprise architect assistant that generates multiple search queries based on a single input query. \n
Generate multiple search queries related to: {standalone_question} \n
Output (4 queries):"""
# Few Shot Examples
few_shot_step_back_examples = [
{
"input": "Could the members of The Police perform lawful arrests?",
"output": "what can the members of The Police do?",
},
{
"input": "Jan Sindel was born in what country?",
"output": "what is Jan Sindel personal history?",
},
]
# We now transform these to example messages
few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
input_variables=["standalone_question"],
example_prompt=few_shot_step_back_examples_prompt,
examples=few_shot_step_back_examples,
)
few_shot_step_back_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert at enterprise architecture world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
),
# Few shot examples
few_shot_prompt,
# New question
("user", "{standalone_question}"),
]
)
# Response prompt
step_back_response_prompt_template = """You are an expert of enterprise architecture world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
# {normal_context}
# {step_back_context}
# Original Question: {standalone_question}
"""
# HyDE document generation
hyde_template = """Please write an architecture scientific passage to answer the question
Question: {standalone_question}
Passage:"""
##################################################
##COLLECTION of prompts - Agentic Workflows
##################################################
#Agent system prompt
#System prompt embedded into human prompt
awqa_human_message = HumanMessage(content=[
{"type": "text", "text": "{user_question}"},
{"type": "text", "text": "You are a helpful AI assistant, collaborating with other assistants."},
{"type": "text", "text": "{system_message}"},
{"type": "text", "text": " Use the provided tools to progress towards answering the question."},
{"type": "text", "text": " You have access to the following tools: {tool_names}."},
])
awqa_template = ChatPromptTemplate.from_messages(
[
(
"human",
"You are a helpful AI assistant, collaborating with other assistants."
"{system_message}"
" Use the provided tools to progress towards answering the question: {user_question}"
" You have access to the following tools: {tool_names}."
),
MessagesPlaceholder(variable_name="messages"),
]
)
#DiagramV2T
diagramV2T_question = "How this architecture solution meets quality standards and alignment with architectural best practices?"
diagramV2T_template = """An image will be passed to you. Please explain how it meets quality standards and alignment with architecture best practices."""
agent_diagram_v2t_system_message = diagramV2T_template
#DiagramType
diagram_type_question = "What is this diagram type? Is a flowchart, C4, sequence-diagram, data flow or any other?"
diagramType_template = """An image will be passed to you. Identify the type of architecture diagram this image is.
For example, flowchart, C4, sequence flow, data flow, or other.
If a type of diagram is not identified that's fine! Just return a that is was not possible to identify the architectural diagram style in this image.
Do not make up or guess ANY extra information. Only extract what exactly diagram type is the images.
"""
agent_diagram_type_system_message = diagramType_template
#DiagramComponents
diagram_component_question = "Please list all components that are part of this current solution architecture"
diagramComponent_template = """An image will be passed to you. Extract from it all components identified in this image.
For example, application, software, connector, relationship, user, name, microservice, middeware, container or other.
If no components are identified that's fine - you don't need to extract any! Just return an empty list.
Do not make up or guess ANY extra information. Only extract what exactly is in the images.
"""
agent_diagram_components_system_message = diagramComponent_template
#DiagramRiskVulnerabilityMitigation
diagram_risk_question = "What are the potential risks and vulnerabilities in this current solution architecture, and how can we mitigate them?"
diagramRVM_template = """An image will be passed to you. Extract from it potential risks and vulnerabilities along with mitigation strategy in current solution architecture.
For example, risk: SQL injection, description: application A connected to MySQL database, mitigation: Use prepared
statements and parameterised queries to handle user input. Also, implement input validation and sanitisation to prevent malicious input from being processed.
If no risks, vulnerabilities or mitigation strategy are identified that's fine - you don't need to extract any! Just return an empty list.
Do not make up or guess ANY extra information. Only extract what exactly is in the image.
"""
agent_diagram_rvm_system_message = diagramRVM_template
#DiagramPatternsStandardsBestPractices
diagram_pattern_question = "Please describe well-architected patterns, standards and best practices that can be applied to the current solution architecture."
diagramPSBP_template = """An image will be passed to you.
List well-architected standards, patterns or best-practices that can be applied to the current solution architecture.
"""
agent_diagram_psbp_system_message = diagramPSBP_template
#DiagramVisualQuestionAnswerer Prompts
diagramVQA_question = """Please describe this diagram"""
diagramVQA_template = """An image will be passed to you. It should be a flowchart or diagram. Please answer the user question."""
agent_diagram_vqa_system_message = diagramVQA_template
|