File size: 4,500 Bytes
7042c3c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21cb336
7042c3c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21cb336
7042c3c
 
 
 
21cb336
7042c3c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a6af9d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#core libraries
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import (
    AIMessage
)

from langgraph.graph import (
    END, 
    StateGraph,
)

from ea4all.src.ea4all_gra.configuration import AgentConfiguration
from ea4all.src.ea4all_gra.data import (
    ListRequirement,
    ListObjective,
    BusinessCapability,
    StakeholderList,
    UserJourney,
)
from ea4all.src.shared.utils import (
    get_llm_client,
    extract_detailed_business_requirements,
)
from ea4all.src.shared.prompts import LLAMA31_PROMPT_FORMAT

from ea4all.src.ea4all_gra.togaf_task1.state import Task1State

#EXECUTE STEP-1: Identify Business Requirements, Objectives, Capabilities, Stakeholders and Journey Agent 
def assess_business_query(state: Task1State, config: RunnableConfig):
    """Identified business requirements, goals, use cases, user journey, stakeholder and business capability from a given business query."""
    #DEFINE agent template & prompt
    #BROKE-DOWN BusinessInput into individual extractions: LLAMA-3 CONTEXT WINDOW limitation
    #REMOVED parser from the chain: LLAMA-3 returning text + ```BusinessInput```
    ##Parser back to chain 2024-10-13
    #Setting streaming=True makes the model produces wrong output
    query = state.get("business_query")

    # Get any user-provided configs - LLM model in use
    configuration = AgentConfiguration.from_runnable_config(config)
    model = get_llm_client(configuration.togaf_model, configuration.api_base_url)

    values = {"business_input": query}
    final_response=[]

    ##EXTRACT BUSINESS REQUIREMENT
    response = extract_detailed_business_requirements(model, ListRequirement, "business requirement", values)
    business_reqs = ""
    try:
        for item in response.requirements:
            business_reqs += ':'.join([item.category, item.requirement.lower()]) + ";"
        final_response += [response.requirements]
    except Exception as e:
        print(f"Houston, we a {e} thorny problem!")

    ##EXTRACT BUSINESS OBJECTIVE
    response = extract_detailed_business_requirements(model, ListObjective, "business objective", values)
    business_goal=[]
    try:
        for item in response.objectives:
            business_goal.append(item.objective)
        final_response += [response.objectives]
    except Exception as e:
        print(f"Houston, we a {e} thorny problem!")


    ##EXTRACT BUSINESS CAPABILITY
    response = extract_detailed_business_requirements(model, BusinessCapability, "business capabilities", values)
    business_capabilities=[]
    try:
        for item in response.capabilities:
            business_capabilities.append(item.capability)
        final_response += [response.capabilities]
    except Exception as e:
        print(f"Houston, we a {e} thorny problem!")


    ##EXTRACT STAKEHOLDER
    response = extract_detailed_business_requirements(model, StakeholderList, "business stakeholder", values)
    business_stakeholder = ""
    try:
        for item in response.stakeholders:
            business_stakeholder += ' '.join([item.stakeholder,item.role.lower(), item.concern]) + "."
        final_response += [response.stakeholders]
    except Exception as e:
        print(f"Houston, we a {e} thorny problem!")


    ##EXTRACT BUSINESS USER JOURNEY
    response = extract_detailed_business_requirements(model, UserJourney, "user journey", values)
    user_journey = ""
    try:
        for item in response.userjourney:
            user_journey += ':'.join([item.persona,item.step.lower()]) + ","
        final_response += [response.userjourney]
    except Exception as e:
        print(f"Houston, we a {e} thorny problem!")

    name = state.get("next")

    return {
        "messages": [AIMessage(content=str(final_response), name=name)],
        "requirement": business_reqs,
        "intent": business_goal,
        "stakeholder": business_stakeholder,
        "userjourney": user_journey,
        "biz_capability": str(business_capabilities)
    }

# Build graphs task1
## TASK-1 Graph
task1_builder = StateGraph(Task1State)

# Define the nodes
task1_builder.add_node("AssessBusinessQuery", assess_business_query)  # assess business input

# Build graph
task1_builder.add_edge("AssessBusinessQuery", END)
task1_builder.set_entry_point("AssessBusinessQuery")

# Set FINISH node end-point
task1_builder.set_finish_point('AssessBusinessQuery')

# Compile
task1_graph = task1_builder.compile()
task1_graph.name = "togaf_identify_business_requirements"