GerlandoRex commited on
Commit
ff3c585
·
1 Parent(s): a58f82e

add: simple gradio app with langgraph

Browse files
Files changed (1) hide show
  1. app.py +271 -0
app.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import os
3
+ import uuid
4
+ import asyncio
5
+ import gradio as gr
6
+
7
+ from langchain_mcp_adapters.client import MultiServerMCPClient
8
+ from langchain_openai import ChatOpenAI
9
+ from langgraph.prebuilt import ToolNode
10
+ from langgraph.graph import MessagesState, END, StateGraph
11
+ from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
12
+ from langgraph.checkpoint.memory import MemorySaver
13
+
14
+ SYSTEM_PROMPT = """
15
+ You are an assistant that can manage Trello boards and projects.
16
+ You will be given a set of tools to work with. Each time you decide to use a tool that modifies in any way a Trello board, you MUST ask the user if wants to proceed.
17
+ If the user's answer is negative, then you have to abort everything and end the conversation.
18
+ """
19
+
20
+
21
+ class LangGraphAgent:
22
+ def __init__(self):
23
+ self.agent_app = None
24
+ self.config = {"configurable": {"thread_id": f"{str(uuid.uuid4())}"}}
25
+ self.memory = MemorySaver()
26
+
27
+ def reset_thread(self):
28
+ """Resets the conversation thread for the agent."""
29
+ self.config = {"configurable": {"thread_id": f"{str(uuid.uuid4())}"}}
30
+ print(
31
+ f"Chat thread reset. New Thread ID: {self.config['configurable']['thread_id']}"
32
+ )
33
+
34
+ async def setup(self):
35
+ print("Setting up LangGraphAgent...")
36
+ mcp_client = MultiServerMCPClient(
37
+ {
38
+ "trello": {
39
+ "url": "http://localhost:8000/sse",
40
+ "transport": "sse",
41
+ }
42
+ }
43
+ )
44
+
45
+ tools = await mcp_client.get_tools()
46
+ tool_node = ToolNode(tools)
47
+
48
+ # Ensure NEBIUS_API_KEY is set
49
+ api_key = os.getenv("NEBIUS_API_KEY")
50
+ if not api_key:
51
+ raise ValueError("NEBIUS_API_KEY environment variable not set.")
52
+
53
+ llm_with_tools = ChatOpenAI(
54
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct",
55
+ temperature=0.0,
56
+ api_key=api_key,
57
+ base_url="https://api.studio.nebius.com/v1/",
58
+ )
59
+ llm_with_tools = llm_with_tools.bind_tools(tools)
60
+
61
+ async def call_llm(state: MessagesState):
62
+ response = await llm_with_tools.ainvoke(
63
+ state["messages"]
64
+ ) # Use await for async invoke
65
+ return {"messages": [response]}
66
+
67
+ graph = StateGraph(MessagesState)
68
+ graph.add_node("llm", functools.partial(call_llm))
69
+ graph.add_node("tool", tool_node)
70
+ graph.set_entry_point("llm")
71
+
72
+ def should_continue(state: MessagesState):
73
+ last_message = state["messages"][-1]
74
+ return (
75
+ "tool"
76
+ if hasattr(last_message, "tool_calls") and last_message.tool_calls
77
+ else END
78
+ )
79
+
80
+ graph.add_conditional_edges("llm", should_continue, {"tool": "tool", END: END})
81
+ graph.add_edge("tool", "llm")
82
+
83
+ self.agent_app = graph.compile(checkpointer=self.memory)
84
+ print("LangGraphAgent setup complete.")
85
+
86
+ async def generate_responses_for_turn(
87
+ self, user_message_text: str, is_first_turn_in_ui: bool
88
+ ) -> list[str]:
89
+ """
90
+ Generates a list of bot utterances for the current turn based on user input.
91
+ """
92
+ langgraph_input_messages = []
93
+ # The SYSTEM_PROMPT is added to the graph input only if it's the first UI interaction *and*
94
+ # the checkpointer implies it's the start of a new conversation for the thread_id.
95
+ # MemorySaver will handle loading history; system prompt is good for first message of a thread.
96
+
97
+ # Check current state in memory to decide if SystemMessage is truly needed
98
+ thread_state = await self.memory.aget(self.config) # Use aget for async
99
+ is_new_thread_conversation = (
100
+ not thread_state
101
+ or not thread_state.get("values")
102
+ or not thread_state["values"]["messages"]
103
+ )
104
+
105
+ if is_new_thread_conversation:
106
+ print("Adding System Prompt for new conversation thread.")
107
+ langgraph_input_messages.append(
108
+ SystemMessage(content=SYSTEM_PROMPT.strip())
109
+ )
110
+
111
+ langgraph_input_messages.append(HumanMessage(content=user_message_text))
112
+
113
+ bot_responses_this_turn = []
114
+ processed_message_ids_in_stream = set()
115
+
116
+ async for result in self.agent_app.astream(
117
+ {"messages": langgraph_input_messages},
118
+ config=self.config,
119
+ stream_mode="values",
120
+ ):
121
+ if not result or "messages" not in result or not result["messages"]:
122
+ continue
123
+
124
+ latest_message_in_graph_state = result["messages"][-1]
125
+
126
+ if (
127
+ isinstance(latest_message_in_graph_state, AIMessage)
128
+ and latest_message_in_graph_state.id
129
+ not in processed_message_ids_in_stream
130
+ ):
131
+ current_ai_msg = latest_message_in_graph_state
132
+ # Add message ID to set of processed messages to avoid duplication from multiple stream events
133
+ # for the same underlying message object.
134
+ # However, AIMessages can be broken into chunks if streaming content.
135
+ # For now, with stream_mode="values", we get full messages.
136
+ # The id check is crucial if the same AIMessage object instance appears multiple times in the stream values.
137
+
138
+ newly_generated_content_for_this_step = []
139
+
140
+ # 1. Handle AIMessage content
141
+ if current_ai_msg.content:
142
+ # Add content if it's new or different from the last added piece
143
+ if (
144
+ not bot_responses_this_turn
145
+ or bot_responses_this_turn[-1] != current_ai_msg.content
146
+ ):
147
+ newly_generated_content_for_this_step.append(
148
+ str(current_ai_msg.content)
149
+ )
150
+
151
+ # 2. Handle tool calls
152
+ if hasattr(current_ai_msg, "tool_calls") and current_ai_msg.tool_calls:
153
+ for tool_call in current_ai_msg.tool_calls:
154
+ # Check if this specific tool call has been processed (e.g., by ID if available, or by content)
155
+ # For simplicity, we assume each tool_call in a new AIMessage is distinct for now
156
+ call_str = f"**Tool Call:** `{tool_call['name']}`\n*Arguments:* `{tool_call['args']}`"
157
+ newly_generated_content_for_this_step.append(call_str)
158
+
159
+ if newly_generated_content_for_this_step:
160
+ bot_responses_this_turn.extend(
161
+ newly_generated_content_for_this_step
162
+ )
163
+ processed_message_ids_in_stream.add(
164
+ current_ai_msg.id
165
+ ) # Mark this AIMessage ID as processed
166
+
167
+ # Deduplicate consecutive identical messages that might arise from streaming nuances
168
+ final_bot_responses = []
169
+ if bot_responses_this_turn:
170
+ final_bot_responses.append(bot_responses_this_turn[0])
171
+ for i in range(1, len(bot_responses_this_turn)):
172
+ if bot_responses_this_turn[i] != bot_responses_this_turn[i - 1]:
173
+ final_bot_responses.append(bot_responses_this_turn[i])
174
+
175
+ return final_bot_responses
176
+
177
+
178
+ agent = LangGraphAgent()
179
+
180
+ # Apply a theme
181
+ theme = gr.themes.Soft(
182
+ primary_hue="blue", secondary_hue="sky", neutral_hue="slate"
183
+ ).set(
184
+ body_background_fill="linear-gradient(to right, #f0f4f8, #e6e9f0)", # Light gradient background
185
+ block_background_fill="white",
186
+ block_border_width="1px",
187
+ block_shadow="*shadow_drop_lg",
188
+ button_primary_background_fill="*primary_500",
189
+ button_primary_text_color="white",
190
+ button_secondary_background_fill="*secondary_500",
191
+ button_secondary_text_color="white",
192
+ )
193
+
194
+ with gr.Blocks(theme=theme, title="Trello AI Assistant") as demo:
195
+ gr.Markdown(
196
+ """
197
+ # Trello AI Assistant
198
+ Manage your Trello boards and projects with AI assistance.
199
+ """
200
+ )
201
+
202
+ chatbot = gr.Chatbot(
203
+ label="Conversation",
204
+ bubble_full_width=False,
205
+ height=600,
206
+ avatar_images=(
207
+ None,
208
+ "https://upload.wikimedia.org/wikipedia/commons/thumb/7/72/Brandon_Sanderson_in_2018.jpg/800px-Brandon_Sanderson_in_2018.jpg?20230101015657",
209
+ ),
210
+ # (user_avatar, bot_avatar) - replace bot avatar with something better or remove
211
+ )
212
+
213
+ with gr.Row():
214
+ msg = gr.Textbox(
215
+ label="Your Message",
216
+ placeholder="Type your message here and press Enter to send...",
217
+ scale=4, # Make textbox take more space
218
+ autofocus=True,
219
+ )
220
+ # submit_button = gr.Button("Send", variant="primary", scale=1) # Optional: if you want an explicit send button
221
+
222
+ clear_button = gr.Button("🗑️ Clear Chat & Reset Conversation")
223
+
224
+ async def respond(user_message_text, chat_history):
225
+ if not user_message_text.strip():
226
+ return (
227
+ chat_history,
228
+ "",
229
+ ) # Ignore empty input, return current history and clear textbox
230
+
231
+ is_first_turn = not chat_history
232
+
233
+ # Append user message to chat_history optimistically for immediate display
234
+ # Bot response will fill in the 'None' later or add new [None, bot_msg] rows
235
+ # This makes UI feel more responsive.
236
+ # chat_history.append([user_message_text, None]) # Temporarily removed for simpler logic below
237
+
238
+ ai_utterances = await agent.generate_responses_for_turn(
239
+ user_message_text, is_first_turn
240
+ )
241
+
242
+ if ai_utterances:
243
+ chat_history.append([user_message_text, ai_utterances[0]])
244
+ for i in range(1, len(ai_utterances)):
245
+ chat_history.append([None, ai_utterances[i]])
246
+ else:
247
+ # If agent provides no utterances (e.g. error or no action)
248
+ chat_history.append(
249
+ [user_message_text, "I don't have a response for that right now."]
250
+ )
251
+
252
+ return chat_history, "" # Return updated history and clear the textbox
253
+
254
+ # Event handlers
255
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
256
+
257
+ # if submit_button: # If you add explicit send button
258
+ # submit_button.click(respond, [msg, chatbot], [chatbot, msg])
259
+
260
+ def clear_chat_and_reset_agent():
261
+ agent.reset_thread()
262
+ return [], "" # Clears chatbot UI and textbox
263
+
264
+ clear_button.click(clear_chat_and_reset_agent, None, [chatbot, msg], queue=False)
265
+
266
+ # Load agent setup when the app starts
267
+ # Using a lambda to ensure asyncio.run is called within the demo's event loop context if needed
268
+ demo.load(lambda: asyncio.run(agent.setup()))
269
+
270
+ if __name__ == "__main__":
271
+ demo.launch()