Commit
·
ce204af
1
Parent(s):
5783a36
add: requirements.txt + app renaming
Browse files- app.py +180 -143
- app_new.py +0 -190
- requirements.txt +401 -0
app.py
CHANGED
@@ -1,153 +1,190 @@
|
|
1 |
-
import functools
|
2 |
-
import os
|
3 |
import uuid
|
4 |
-
import
|
5 |
import gradio as gr
|
6 |
|
7 |
-
from
|
8 |
-
|
9 |
-
|
10 |
-
from
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
)
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
#
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
with gr.Column(scale=1):
|
72 |
-
submit_button = gr.Button("Send", variant="primary")
|
73 |
-
clear_button = gr.Button("Reset Conversation", variant="secondary")
|
74 |
-
|
75 |
-
async def respond(user_message_text, chat_history):
|
76 |
-
if not user_message_text.strip():
|
77 |
-
return (
|
78 |
-
chat_history,
|
79 |
-
"",
|
80 |
-
) # Ignore empty input, return current history and clear textbox
|
81 |
-
|
82 |
-
is_first_turn = not chat_history
|
83 |
-
|
84 |
-
# Append user message to chat_history optimistically for immediate display
|
85 |
-
# Bot response will fill in the 'None' later or add new [None, bot_msg] rows
|
86 |
-
# This makes UI feel more responsive.
|
87 |
-
# chat_history.append([user_message_text, None]) # Temporarily removed for simpler logic below
|
88 |
-
|
89 |
-
ai_utterances = await agent.generate_responses_for_turn(
|
90 |
-
user_message_text, is_first_turn
|
91 |
)
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
-
return chat_history, "" # Return updated history and clear the textbox
|
104 |
-
|
105 |
-
# Event handlers
|
106 |
-
msg.submit(respond, [msg, chatbot], [chatbot, msg])
|
107 |
-
submit_button.click(respond, [msg, chatbot], [chatbot, msg])
|
108 |
-
|
109 |
-
def clear_chat_and_reset_agent():
|
110 |
-
agent.reset_thread()
|
111 |
-
return [], "" # Clears chatbot UI and textbox
|
112 |
-
|
113 |
-
def set_environment_variables(github_repo, github_token, trello_api, trello_token, hf_token):
|
114 |
-
# Set environment variables
|
115 |
-
if github_repo:
|
116 |
-
os.environ["GITHUB_REPO"] = github_repo
|
117 |
-
if github_token:
|
118 |
-
os.environ["GITHUB_TOKEN"] = github_token
|
119 |
-
if trello_api:
|
120 |
-
os.environ["TRELLO_API_KEY"] = trello_api
|
121 |
-
if trello_token:
|
122 |
-
os.environ["TRELLO_TOKEN"] = trello_token
|
123 |
-
if hf_token:
|
124 |
-
os.environ["NEBIUS_API_KEY"] = hf_token
|
125 |
-
|
126 |
-
# Create a message showing which variables were set
|
127 |
-
set_vars = []
|
128 |
-
if github_repo: set_vars.append("GITHUB_REPO")
|
129 |
-
if github_token: set_vars.append("GITHUB_TOKEN")
|
130 |
-
if trello_api: set_vars.append("TRELLO_API_KEY")
|
131 |
-
if trello_token: set_vars.append("TRELLO_TOKEN")
|
132 |
-
if hf_token: set_vars.append("NEBIUS_API_KEY")
|
133 |
-
|
134 |
-
if set_vars:
|
135 |
-
return f"✅ Set environment variables: {', '.join(set_vars)}"
|
136 |
-
else:
|
137 |
-
return "⚠️ No environment variables were set"
|
138 |
-
|
139 |
-
# Connect the set environment variables button
|
140 |
-
set_env_button.click(
|
141 |
-
set_environment_variables,
|
142 |
-
inputs=[github_repo, github_token, trello_api, trello_token, hf_token],
|
143 |
-
outputs=[env_status]
|
144 |
-
)
|
145 |
-
|
146 |
-
clear_button.click(clear_chat_and_reset_agent, None, [chatbot, msg], queue=False)
|
147 |
-
|
148 |
-
# Load agent setup when the app starts
|
149 |
-
# Using a lambda to ensure asyncio.run is called within the demo's event loop context if needed
|
150 |
-
demo.load(lambda: asyncio.run(agent.setup()))
|
151 |
|
152 |
if __name__ == "__main__":
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import uuid
|
2 |
+
import os
|
3 |
import gradio as gr
|
4 |
|
5 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
6 |
+
|
7 |
+
# Assuming mcpc_graph.py and its setup_graph function are in the same directory.
|
8 |
+
from mcpc_graph import setup_graph
|
9 |
+
|
10 |
+
|
11 |
+
async def chat_logic(
|
12 |
+
message,
|
13 |
+
history,
|
14 |
+
session_state,
|
15 |
+
github_repo,
|
16 |
+
github_token,
|
17 |
+
trello_api,
|
18 |
+
trello_token,
|
19 |
+
hf_token,
|
20 |
+
):
|
21 |
+
"""
|
22 |
+
Handles the main chat logic, including environment setup and streaming responses.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
message (str): The user's input message.
|
26 |
+
history (list): The chat history managed by Gradio.
|
27 |
+
session_state (dict): A dictionary to maintain state across calls for a session.
|
28 |
+
github_repo (str): The GitHub repository (username/repo).
|
29 |
+
github_token (str): The GitHub personal access token.
|
30 |
+
trello_api (str): The Trello API key.
|
31 |
+
trello_token (str): The Trello API token.
|
32 |
+
hf_token (str): The Hugging Face API token.
|
33 |
+
|
34 |
+
Yields:
|
35 |
+
str: The bot's streaming response or an interruption message.
|
36 |
+
"""
|
37 |
+
# Retrieve the initialized graph and interrupt handler from the session state.
|
38 |
+
app = session_state.get("app")
|
39 |
+
human_resume_node = session_state.get("human_resume_node")
|
40 |
+
|
41 |
+
# If the graph is not initialized, this is the first message of the session.
|
42 |
+
# We configure the environment and set up the graph.
|
43 |
+
if app is None:
|
44 |
+
# Check if all required fields have been filled out.
|
45 |
+
if not all([github_repo, github_token, trello_api, trello_token, hf_token]):
|
46 |
+
yield "Error: Please provide all API keys and the GitHub repository in the 'API Configuration' section before starting the chat."
|
47 |
+
return
|
48 |
+
|
49 |
+
# Set environment variables for the current process.
|
50 |
+
os.environ["GITHUB_REPO"] = github_repo
|
51 |
+
os.environ["HUGGINGFACE_API_KEY"] = hf_token
|
52 |
+
|
53 |
+
# Asynchronously initialize the graph and store it in the session state
|
54 |
+
# to reuse it for subsequent messages in the same session.
|
55 |
+
app, human_resume_node = await setup_graph(
|
56 |
+
github_token=github_token, trello_api=trello_api, trello_token=trello_token
|
57 |
+
)
|
58 |
+
session_state["app"] = app
|
59 |
+
session_state["human_resume_node"] = human_resume_node
|
60 |
+
|
61 |
+
# Ensure a unique thread_id for the conversation.
|
62 |
+
thread_id = session_state.get("thread_id")
|
63 |
+
if not thread_id:
|
64 |
+
thread_id = str(uuid.uuid4())
|
65 |
+
session_state["thread_id"] = thread_id
|
66 |
+
|
67 |
+
# Check if the current message is a response to a human interruption.
|
68 |
+
is_message_command = session_state.get("is_message_command", False)
|
69 |
+
|
70 |
+
config = {
|
71 |
+
"configurable": {"thread_id": thread_id},
|
72 |
+
"recursion_limit": 100,
|
73 |
+
}
|
74 |
+
|
75 |
+
if is_message_command:
|
76 |
+
# The user is providing feedback to an interruption.
|
77 |
+
app_input = human_resume_node.call_human_interrupt_agent(message)
|
78 |
+
session_state["is_message_command"] = False
|
79 |
+
else:
|
80 |
+
# A standard user message.
|
81 |
+
app_input = {"messages": [HumanMessage(content=message)]}
|
82 |
+
|
83 |
+
app_input["github_repo"] = github_repo
|
84 |
+
# Stream the graph's response.
|
85 |
+
# This revised logic handles intermediate messages and prevents duplication.
|
86 |
+
async for res in app.astream(app_input, config=config, stream_mode="values"):
|
87 |
+
if "messages" in res:
|
88 |
+
last_message = res["messages"][-1]
|
89 |
+
# We only stream content from AIMessages. Any intermediate AIMessages
|
90 |
+
# (e.g., "I will now use a tool") will be overwritten by subsequent
|
91 |
+
# AIMessages in the UI, so only the final answer is visible.
|
92 |
+
if isinstance(last_message, AIMessage):
|
93 |
+
yield last_message.content
|
94 |
+
|
95 |
+
elif "__interrupt__" in res:
|
96 |
+
# Handle interruptions where the agent needs human feedback.
|
97 |
+
interruption_message = res["__interrupt__"][0]
|
98 |
+
session_state["is_message_command"] = True
|
99 |
+
yield interruption_message.value
|
100 |
+
return # Stop the stream and wait for the user's next message.
|
101 |
+
|
102 |
+
|
103 |
+
def create_gradio_app():
|
104 |
+
"""Creates and launches the Gradio web application."""
|
105 |
+
print("Launching Gradio app...")
|
106 |
+
|
107 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="LangGraph Multi-Agent Chat") as demo:
|
108 |
+
session_state = gr.State({})
|
109 |
+
|
110 |
+
gr.Markdown(
|
111 |
+
"""
|
112 |
+
# LangGraph Multi-Agent Project Manager
|
113 |
+
|
114 |
+
Interact with a multi-agent system powered by LangGraph.
|
115 |
+
You can assign tasks related to Trello and Github.
|
116 |
+
The system can be interrupted for human feedback when it needs to use a tool.
|
117 |
+
"""
|
118 |
+
)
|
119 |
|
120 |
+
chatbot = gr.Chatbot(
|
121 |
+
[],
|
122 |
+
elem_id="chatbot",
|
123 |
+
bubble_full_width=False,
|
124 |
+
height=600,
|
125 |
+
label="Multi-Agent Chat",
|
126 |
+
show_label=False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
)
|
128 |
|
129 |
+
# --- FIX: Added an accordion for API keys and configuration ---
|
130 |
+
with gr.Accordion("API Configuration", open=True):
|
131 |
+
gr.Markdown(
|
132 |
+
"Please enter your credentials. The agent will be configured when you send your first message."
|
133 |
+
)
|
134 |
+
github_repo = gr.Textbox(
|
135 |
+
label="GitHub Repo",
|
136 |
+
placeholder="e.g., username/repository",
|
137 |
+
info="The target repository for GitHub operations.",
|
138 |
+
)
|
139 |
+
github_token = gr.Textbox(
|
140 |
+
label="GitHub Token",
|
141 |
+
placeholder="ghp_xxxxxxxxxxxx",
|
142 |
+
type="password",
|
143 |
+
info="A fine-grained personal access token.",
|
144 |
)
|
145 |
+
trello_api = gr.Textbox(
|
146 |
+
label="Trello API Key",
|
147 |
+
placeholder="Your Trello API key",
|
148 |
+
info="Your API key from trello.com/power-ups/admin.",
|
149 |
+
)
|
150 |
+
trello_token = gr.Textbox(
|
151 |
+
label="Trello Token",
|
152 |
+
placeholder="Your Trello token",
|
153 |
+
type="password",
|
154 |
+
info="A token generated from your Trello account.",
|
155 |
+
)
|
156 |
+
hf_token = gr.Textbox(
|
157 |
+
label="Hugging Face Token",
|
158 |
+
placeholder="hf_xxxxxxxxxxxx",
|
159 |
+
type="password",
|
160 |
+
info="Used for tools requiring Hugging Face models.",
|
161 |
+
)
|
162 |
+
|
163 |
+
gr.ChatInterface(
|
164 |
+
fn=chat_logic,
|
165 |
+
chatbot=chatbot,
|
166 |
+
additional_inputs=[
|
167 |
+
session_state,
|
168 |
+
github_repo,
|
169 |
+
github_token,
|
170 |
+
trello_api,
|
171 |
+
trello_token,
|
172 |
+
hf_token,
|
173 |
+
],
|
174 |
+
title=None,
|
175 |
+
description=None,
|
176 |
+
)
|
177 |
+
|
178 |
+
demo.queue()
|
179 |
+
demo.launch(debug=True)
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
if __name__ == "__main__":
|
183 |
+
try:
|
184 |
+
# The main function to create the app is now synchronous.
|
185 |
+
# Gradio handles the async calls within the chat logic.
|
186 |
+
create_gradio_app()
|
187 |
+
except KeyboardInterrupt:
|
188 |
+
print("\nShutting down Gradio app.")
|
189 |
+
except Exception as e:
|
190 |
+
print(f"An error occurred: {e}")
|
app_new.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
import uuid
|
2 |
-
import os
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
from langchain_core.messages import HumanMessage, AIMessage
|
6 |
-
|
7 |
-
# Assuming mcpc_graph.py and its setup_graph function are in the same directory.
|
8 |
-
from mcpc_graph import setup_graph
|
9 |
-
|
10 |
-
|
11 |
-
async def chat_logic(
|
12 |
-
message,
|
13 |
-
history,
|
14 |
-
session_state,
|
15 |
-
github_repo,
|
16 |
-
github_token,
|
17 |
-
trello_api,
|
18 |
-
trello_token,
|
19 |
-
hf_token,
|
20 |
-
):
|
21 |
-
"""
|
22 |
-
Handles the main chat logic, including environment setup and streaming responses.
|
23 |
-
|
24 |
-
Args:
|
25 |
-
message (str): The user's input message.
|
26 |
-
history (list): The chat history managed by Gradio.
|
27 |
-
session_state (dict): A dictionary to maintain state across calls for a session.
|
28 |
-
github_repo (str): The GitHub repository (username/repo).
|
29 |
-
github_token (str): The GitHub personal access token.
|
30 |
-
trello_api (str): The Trello API key.
|
31 |
-
trello_token (str): The Trello API token.
|
32 |
-
hf_token (str): The Hugging Face API token.
|
33 |
-
|
34 |
-
Yields:
|
35 |
-
str: The bot's streaming response or an interruption message.
|
36 |
-
"""
|
37 |
-
# Retrieve the initialized graph and interrupt handler from the session state.
|
38 |
-
app = session_state.get("app")
|
39 |
-
human_resume_node = session_state.get("human_resume_node")
|
40 |
-
|
41 |
-
# If the graph is not initialized, this is the first message of the session.
|
42 |
-
# We configure the environment and set up the graph.
|
43 |
-
if app is None:
|
44 |
-
# Check if all required fields have been filled out.
|
45 |
-
if not all([github_repo, github_token, trello_api, trello_token, hf_token]):
|
46 |
-
yield "Error: Please provide all API keys and the GitHub repository in the 'API Configuration' section before starting the chat."
|
47 |
-
return
|
48 |
-
|
49 |
-
# Set environment variables for the current process.
|
50 |
-
os.environ["GITHUB_REPO"] = github_repo
|
51 |
-
os.environ["HUGGINGFACE_API_KEY"] = hf_token
|
52 |
-
|
53 |
-
# Asynchronously initialize the graph and store it in the session state
|
54 |
-
# to reuse it for subsequent messages in the same session.
|
55 |
-
app, human_resume_node = await setup_graph(
|
56 |
-
github_token=github_token, trello_api=trello_api, trello_token=trello_token
|
57 |
-
)
|
58 |
-
session_state["app"] = app
|
59 |
-
session_state["human_resume_node"] = human_resume_node
|
60 |
-
|
61 |
-
# Ensure a unique thread_id for the conversation.
|
62 |
-
thread_id = session_state.get("thread_id")
|
63 |
-
if not thread_id:
|
64 |
-
thread_id = str(uuid.uuid4())
|
65 |
-
session_state["thread_id"] = thread_id
|
66 |
-
|
67 |
-
# Check if the current message is a response to a human interruption.
|
68 |
-
is_message_command = session_state.get("is_message_command", False)
|
69 |
-
|
70 |
-
config = {
|
71 |
-
"configurable": {"thread_id": thread_id},
|
72 |
-
"recursion_limit": 100,
|
73 |
-
}
|
74 |
-
|
75 |
-
if is_message_command:
|
76 |
-
# The user is providing feedback to an interruption.
|
77 |
-
app_input = human_resume_node.call_human_interrupt_agent(message)
|
78 |
-
session_state["is_message_command"] = False
|
79 |
-
else:
|
80 |
-
# A standard user message.
|
81 |
-
app_input = {"messages": [HumanMessage(content=message)]}
|
82 |
-
|
83 |
-
app_input["github_repo"] = github_repo
|
84 |
-
# Stream the graph's response.
|
85 |
-
# This revised logic handles intermediate messages and prevents duplication.
|
86 |
-
async for res in app.astream(app_input, config=config, stream_mode="values"):
|
87 |
-
if "messages" in res:
|
88 |
-
last_message = res["messages"][-1]
|
89 |
-
# We only stream content from AIMessages. Any intermediate AIMessages
|
90 |
-
# (e.g., "I will now use a tool") will be overwritten by subsequent
|
91 |
-
# AIMessages in the UI, so only the final answer is visible.
|
92 |
-
if isinstance(last_message, AIMessage):
|
93 |
-
yield last_message.content
|
94 |
-
|
95 |
-
elif "__interrupt__" in res:
|
96 |
-
# Handle interruptions where the agent needs human feedback.
|
97 |
-
interruption_message = res["__interrupt__"][0]
|
98 |
-
session_state["is_message_command"] = True
|
99 |
-
yield interruption_message.value
|
100 |
-
return # Stop the stream and wait for the user's next message.
|
101 |
-
|
102 |
-
|
103 |
-
def create_gradio_app():
|
104 |
-
"""Creates and launches the Gradio web application."""
|
105 |
-
print("Launching Gradio app...")
|
106 |
-
|
107 |
-
with gr.Blocks(theme=gr.themes.Soft(), title="LangGraph Multi-Agent Chat") as demo:
|
108 |
-
session_state = gr.State({})
|
109 |
-
|
110 |
-
gr.Markdown(
|
111 |
-
"""
|
112 |
-
# LangGraph Multi-Agent Project Manager
|
113 |
-
|
114 |
-
Interact with a multi-agent system powered by LangGraph.
|
115 |
-
You can assign tasks related to Trello and Github.
|
116 |
-
The system can be interrupted for human feedback when it needs to use a tool.
|
117 |
-
"""
|
118 |
-
)
|
119 |
-
|
120 |
-
chatbot = gr.Chatbot(
|
121 |
-
[],
|
122 |
-
elem_id="chatbot",
|
123 |
-
bubble_full_width=False,
|
124 |
-
height=600,
|
125 |
-
label="Multi-Agent Chat",
|
126 |
-
show_label=False,
|
127 |
-
)
|
128 |
-
|
129 |
-
# --- FIX: Added an accordion for API keys and configuration ---
|
130 |
-
with gr.Accordion("API Configuration", open=True):
|
131 |
-
gr.Markdown(
|
132 |
-
"Please enter your credentials. The agent will be configured when you send your first message."
|
133 |
-
)
|
134 |
-
github_repo = gr.Textbox(
|
135 |
-
label="GitHub Repo",
|
136 |
-
placeholder="e.g., username/repository",
|
137 |
-
info="The target repository for GitHub operations.",
|
138 |
-
)
|
139 |
-
github_token = gr.Textbox(
|
140 |
-
label="GitHub Token",
|
141 |
-
placeholder="ghp_xxxxxxxxxxxx",
|
142 |
-
type="password",
|
143 |
-
info="A fine-grained personal access token.",
|
144 |
-
)
|
145 |
-
trello_api = gr.Textbox(
|
146 |
-
label="Trello API Key",
|
147 |
-
placeholder="Your Trello API key",
|
148 |
-
info="Your API key from trello.com/power-ups/admin.",
|
149 |
-
)
|
150 |
-
trello_token = gr.Textbox(
|
151 |
-
label="Trello Token",
|
152 |
-
placeholder="Your Trello token",
|
153 |
-
type="password",
|
154 |
-
info="A token generated from your Trello account.",
|
155 |
-
)
|
156 |
-
hf_token = gr.Textbox(
|
157 |
-
label="Hugging Face Token",
|
158 |
-
placeholder="hf_xxxxxxxxxxxx",
|
159 |
-
type="password",
|
160 |
-
info="Used for tools requiring Hugging Face models.",
|
161 |
-
)
|
162 |
-
|
163 |
-
chat_interface = gr.ChatInterface(
|
164 |
-
fn=chat_logic,
|
165 |
-
chatbot=chatbot,
|
166 |
-
additional_inputs=[
|
167 |
-
session_state,
|
168 |
-
github_repo,
|
169 |
-
github_token,
|
170 |
-
trello_api,
|
171 |
-
trello_token,
|
172 |
-
hf_token,
|
173 |
-
],
|
174 |
-
title=None,
|
175 |
-
description=None,
|
176 |
-
)
|
177 |
-
|
178 |
-
demo.queue()
|
179 |
-
demo.launch(debug=True)
|
180 |
-
|
181 |
-
|
182 |
-
if __name__ == "__main__":
|
183 |
-
try:
|
184 |
-
# The main function to create the app is now synchronous.
|
185 |
-
# Gradio handles the async calls within the chat logic.
|
186 |
-
create_gradio_app()
|
187 |
-
except KeyboardInterrupt:
|
188 |
-
print("\nShutting down Gradio app.")
|
189 |
-
except Exception as e:
|
190 |
-
print(f"An error occurred: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
ADDED
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file was autogenerated by uv via the following command:
|
2 |
+
# uv export --format requirements-txt --no-hashes
|
3 |
+
aiofiles==24.1.0
|
4 |
+
# via gradio
|
5 |
+
aiohappyeyeballs==2.6.1
|
6 |
+
# via aiohttp
|
7 |
+
aiohttp==3.12.6
|
8 |
+
# via litellm
|
9 |
+
aiosignal==1.3.2
|
10 |
+
# via aiohttp
|
11 |
+
aiosqlite==0.21.0
|
12 |
+
# via langgraph-checkpoint-sqlite
|
13 |
+
annotated-types==0.7.0
|
14 |
+
# via pydantic
|
15 |
+
anyio==4.9.0
|
16 |
+
# via
|
17 |
+
# gradio
|
18 |
+
# httpx
|
19 |
+
# mcp
|
20 |
+
# openai
|
21 |
+
# sse-starlette
|
22 |
+
# starlette
|
23 |
+
attrs==25.3.0
|
24 |
+
# via
|
25 |
+
# aiohttp
|
26 |
+
# jsonschema
|
27 |
+
# referencing
|
28 |
+
audioop-lts==0.2.1 ; python_full_version >= '3.13'
|
29 |
+
# via gradio
|
30 |
+
beautifulsoup4==4.13.4
|
31 |
+
# via markdownify
|
32 |
+
certifi==2025.4.26
|
33 |
+
# via
|
34 |
+
# httpcore
|
35 |
+
# httpx
|
36 |
+
# requests
|
37 |
+
cffi==1.17.1 ; platform_python_implementation == 'PyPy'
|
38 |
+
# via zstandard
|
39 |
+
cfgv==3.4.0
|
40 |
+
# via pre-commit
|
41 |
+
charset-normalizer==3.4.2
|
42 |
+
# via requests
|
43 |
+
click==8.2.1
|
44 |
+
# via
|
45 |
+
# duckduckgo-search
|
46 |
+
# litellm
|
47 |
+
# typer
|
48 |
+
# uvicorn
|
49 |
+
colorama==0.4.6 ; sys_platform == 'win32'
|
50 |
+
# via
|
51 |
+
# click
|
52 |
+
# tqdm
|
53 |
+
distlib==0.3.9
|
54 |
+
# via virtualenv
|
55 |
+
distro==1.9.0
|
56 |
+
# via openai
|
57 |
+
duckduckgo-search==8.0.2
|
58 |
+
# via smolagents
|
59 |
+
exceptiongroup==1.3.0
|
60 |
+
# via fastmcp
|
61 |
+
fastapi==0.115.12
|
62 |
+
# via gradio
|
63 |
+
fastmcp==2.5.2
|
64 |
+
# via pmcp
|
65 |
+
ffmpy==0.5.0
|
66 |
+
# via gradio
|
67 |
+
filelock==3.18.0
|
68 |
+
# via
|
69 |
+
# huggingface-hub
|
70 |
+
# virtualenv
|
71 |
+
frozenlist==1.6.0
|
72 |
+
# via
|
73 |
+
# aiohttp
|
74 |
+
# aiosignal
|
75 |
+
fsspec==2025.5.1
|
76 |
+
# via
|
77 |
+
# gradio-client
|
78 |
+
# huggingface-hub
|
79 |
+
gradio==5.32.0
|
80 |
+
# via pmcp
|
81 |
+
gradio-client==1.10.2
|
82 |
+
# via gradio
|
83 |
+
grandalf==0.8
|
84 |
+
# via pmcp
|
85 |
+
groovy==0.1.2
|
86 |
+
# via gradio
|
87 |
+
h11==0.16.0
|
88 |
+
# via
|
89 |
+
# httpcore
|
90 |
+
# uvicorn
|
91 |
+
hf-xet==1.1.2 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
|
92 |
+
# via huggingface-hub
|
93 |
+
httpcore==1.0.9
|
94 |
+
# via httpx
|
95 |
+
httpx==0.28.1
|
96 |
+
# via
|
97 |
+
# fastmcp
|
98 |
+
# gradio
|
99 |
+
# gradio-client
|
100 |
+
# langgraph-sdk
|
101 |
+
# langsmith
|
102 |
+
# litellm
|
103 |
+
# mcp
|
104 |
+
# openai
|
105 |
+
# pmcp
|
106 |
+
# safehttpx
|
107 |
+
httpx-sse==0.4.0
|
108 |
+
# via mcp
|
109 |
+
huggingface-hub==0.32.3
|
110 |
+
# via
|
111 |
+
# gradio
|
112 |
+
# gradio-client
|
113 |
+
# smolagents
|
114 |
+
# tokenizers
|
115 |
+
identify==2.6.12
|
116 |
+
# via pre-commit
|
117 |
+
idna==3.10
|
118 |
+
# via
|
119 |
+
# anyio
|
120 |
+
# httpx
|
121 |
+
# requests
|
122 |
+
# yarl
|
123 |
+
importlib-metadata==8.7.0
|
124 |
+
# via litellm
|
125 |
+
jinja2==3.1.6
|
126 |
+
# via
|
127 |
+
# gradio
|
128 |
+
# litellm
|
129 |
+
# smolagents
|
130 |
+
jiter==0.10.0
|
131 |
+
# via openai
|
132 |
+
jsonpatch==1.33
|
133 |
+
# via langchain-core
|
134 |
+
jsonpointer==3.0.0
|
135 |
+
# via jsonpatch
|
136 |
+
jsonref==1.1.0
|
137 |
+
# via mcpadapt
|
138 |
+
jsonschema==4.24.0
|
139 |
+
# via litellm
|
140 |
+
jsonschema-specifications==2025.4.1
|
141 |
+
# via jsonschema
|
142 |
+
langchain-core==0.3.63
|
143 |
+
# via
|
144 |
+
# langchain-mcp-adapters
|
145 |
+
# langchain-openai
|
146 |
+
# langgraph
|
147 |
+
# langgraph-checkpoint
|
148 |
+
# langgraph-prebuilt
|
149 |
+
langchain-mcp-adapters==0.1.1
|
150 |
+
# via pmcp
|
151 |
+
langchain-openai==0.3.18
|
152 |
+
# via pmcp
|
153 |
+
langgraph==0.4.7
|
154 |
+
# via pmcp
|
155 |
+
langgraph-checkpoint==2.0.26
|
156 |
+
# via
|
157 |
+
# langgraph
|
158 |
+
# langgraph-checkpoint-sqlite
|
159 |
+
# langgraph-prebuilt
|
160 |
+
langgraph-checkpoint-sqlite==2.0.10
|
161 |
+
# via pmcp
|
162 |
+
langgraph-prebuilt==0.2.2
|
163 |
+
# via langgraph
|
164 |
+
langgraph-sdk==0.1.70
|
165 |
+
# via langgraph
|
166 |
+
langsmith==0.3.43
|
167 |
+
# via langchain-core
|
168 |
+
litellm==1.72.0
|
169 |
+
# via smolagents
|
170 |
+
lxml==5.4.0
|
171 |
+
# via duckduckgo-search
|
172 |
+
markdown-it-py==3.0.0
|
173 |
+
# via rich
|
174 |
+
markdownify==1.1.0
|
175 |
+
# via smolagents
|
176 |
+
markupsafe==3.0.2
|
177 |
+
# via
|
178 |
+
# gradio
|
179 |
+
# jinja2
|
180 |
+
mcp==1.9.0
|
181 |
+
# via
|
182 |
+
# fastmcp
|
183 |
+
# gradio
|
184 |
+
# langchain-mcp-adapters
|
185 |
+
# mcpadapt
|
186 |
+
# pmcp
|
187 |
+
# smolagents
|
188 |
+
mcpadapt==0.1.9
|
189 |
+
# via smolagents
|
190 |
+
mdurl==0.1.2
|
191 |
+
# via markdown-it-py
|
192 |
+
multidict==6.4.4
|
193 |
+
# via
|
194 |
+
# aiohttp
|
195 |
+
# yarl
|
196 |
+
nodeenv==1.9.1
|
197 |
+
# via pre-commit
|
198 |
+
numpy==2.2.6
|
199 |
+
# via
|
200 |
+
# gradio
|
201 |
+
# pandas
|
202 |
+
openai==1.82.1
|
203 |
+
# via
|
204 |
+
# langchain-openai
|
205 |
+
# litellm
|
206 |
+
openapi-pydantic==0.5.1
|
207 |
+
# via fastmcp
|
208 |
+
orjson==3.10.18
|
209 |
+
# via
|
210 |
+
# gradio
|
211 |
+
# langgraph-sdk
|
212 |
+
# langsmith
|
213 |
+
ormsgpack==1.10.0
|
214 |
+
# via langgraph-checkpoint
|
215 |
+
packaging==24.2
|
216 |
+
# via
|
217 |
+
# gradio
|
218 |
+
# gradio-client
|
219 |
+
# huggingface-hub
|
220 |
+
# langchain-core
|
221 |
+
# langsmith
|
222 |
+
pandas==2.2.3
|
223 |
+
# via gradio
|
224 |
+
pillow==11.2.1
|
225 |
+
# via
|
226 |
+
# gradio
|
227 |
+
# smolagents
|
228 |
+
platformdirs==4.3.8
|
229 |
+
# via virtualenv
|
230 |
+
pre-commit==4.2.0
|
231 |
+
primp==0.15.0
|
232 |
+
# via duckduckgo-search
|
233 |
+
propcache==0.3.1
|
234 |
+
# via
|
235 |
+
# aiohttp
|
236 |
+
# yarl
|
237 |
+
pycparser==2.22 ; platform_python_implementation == 'PyPy'
|
238 |
+
# via cffi
|
239 |
+
pydantic==2.11.5
|
240 |
+
# via
|
241 |
+
# fastapi
|
242 |
+
# gradio
|
243 |
+
# langchain-core
|
244 |
+
# langgraph
|
245 |
+
# langsmith
|
246 |
+
# litellm
|
247 |
+
# mcp
|
248 |
+
# mcpadapt
|
249 |
+
# openai
|
250 |
+
# openapi-pydantic
|
251 |
+
# pydantic-settings
|
252 |
+
pydantic-core==2.33.2
|
253 |
+
# via pydantic
|
254 |
+
pydantic-settings==2.9.1
|
255 |
+
# via mcp
|
256 |
+
pydub==0.25.1
|
257 |
+
# via gradio
|
258 |
+
pygments==2.19.1
|
259 |
+
# via rich
|
260 |
+
pyparsing==3.2.3
|
261 |
+
# via grandalf
|
262 |
+
python-dateutil==2.9.0.post0
|
263 |
+
# via pandas
|
264 |
+
python-dotenv==1.1.0
|
265 |
+
# via
|
266 |
+
# fastmcp
|
267 |
+
# litellm
|
268 |
+
# mcp
|
269 |
+
# mcpadapt
|
270 |
+
# pydantic-settings
|
271 |
+
# smolagents
|
272 |
+
python-multipart==0.0.20
|
273 |
+
# via
|
274 |
+
# gradio
|
275 |
+
# mcp
|
276 |
+
pytz==2025.2
|
277 |
+
# via pandas
|
278 |
+
pyyaml==6.0.2
|
279 |
+
# via
|
280 |
+
# gradio
|
281 |
+
# huggingface-hub
|
282 |
+
# langchain-core
|
283 |
+
# pre-commit
|
284 |
+
referencing==0.36.2
|
285 |
+
# via
|
286 |
+
# jsonschema
|
287 |
+
# jsonschema-specifications
|
288 |
+
regex==2024.11.6
|
289 |
+
# via tiktoken
|
290 |
+
requests==2.32.3
|
291 |
+
# via
|
292 |
+
# huggingface-hub
|
293 |
+
# langsmith
|
294 |
+
# pmcp
|
295 |
+
# requests-toolbelt
|
296 |
+
# smolagents
|
297 |
+
# tiktoken
|
298 |
+
requests-toolbelt==1.0.0
|
299 |
+
# via langsmith
|
300 |
+
rich==14.0.0
|
301 |
+
# via
|
302 |
+
# fastmcp
|
303 |
+
# smolagents
|
304 |
+
# typer
|
305 |
+
rpds-py==0.25.1
|
306 |
+
# via
|
307 |
+
# jsonschema
|
308 |
+
# referencing
|
309 |
+
ruff==0.11.12
|
310 |
+
# via gradio
|
311 |
+
safehttpx==0.1.6
|
312 |
+
# via gradio
|
313 |
+
semantic-version==2.10.0
|
314 |
+
# via gradio
|
315 |
+
shellingham==1.5.4
|
316 |
+
# via typer
|
317 |
+
six==1.17.0
|
318 |
+
# via
|
319 |
+
# markdownify
|
320 |
+
# python-dateutil
|
321 |
+
smolagents==1.17.0
|
322 |
+
# via pmcp
|
323 |
+
sniffio==1.3.1
|
324 |
+
# via
|
325 |
+
# anyio
|
326 |
+
# openai
|
327 |
+
soupsieve==2.7
|
328 |
+
# via beautifulsoup4
|
329 |
+
sqlite-vec==0.1.6
|
330 |
+
# via langgraph-checkpoint-sqlite
|
331 |
+
sse-starlette==2.3.6
|
332 |
+
# via mcp
|
333 |
+
starlette==0.46.2
|
334 |
+
# via
|
335 |
+
# fastapi
|
336 |
+
# gradio
|
337 |
+
# mcp
|
338 |
+
tenacity==9.1.2
|
339 |
+
# via langchain-core
|
340 |
+
tiktoken==0.9.0
|
341 |
+
# via
|
342 |
+
# langchain-openai
|
343 |
+
# litellm
|
344 |
+
tokenizers==0.21.1
|
345 |
+
# via litellm
|
346 |
+
tomlkit==0.13.2
|
347 |
+
# via gradio
|
348 |
+
tqdm==4.67.1
|
349 |
+
# via
|
350 |
+
# huggingface-hub
|
351 |
+
# openai
|
352 |
+
typer==0.16.0
|
353 |
+
# via
|
354 |
+
# fastmcp
|
355 |
+
# gradio
|
356 |
+
# mcp
|
357 |
+
typing-extensions==4.13.2
|
358 |
+
# via
|
359 |
+
# aiosqlite
|
360 |
+
# anyio
|
361 |
+
# beautifulsoup4
|
362 |
+
# exceptiongroup
|
363 |
+
# fastapi
|
364 |
+
# gradio
|
365 |
+
# gradio-client
|
366 |
+
# huggingface-hub
|
367 |
+
# langchain-core
|
368 |
+
# openai
|
369 |
+
# pydantic
|
370 |
+
# pydantic-core
|
371 |
+
# referencing
|
372 |
+
# typer
|
373 |
+
# typing-inspection
|
374 |
+
typing-inspection==0.4.1
|
375 |
+
# via
|
376 |
+
# pydantic
|
377 |
+
# pydantic-settings
|
378 |
+
tzdata==2025.2
|
379 |
+
# via pandas
|
380 |
+
urllib3==2.4.0
|
381 |
+
# via
|
382 |
+
# gradio
|
383 |
+
# requests
|
384 |
+
uvicorn==0.34.3 ; sys_platform != 'emscripten'
|
385 |
+
# via
|
386 |
+
# gradio
|
387 |
+
# mcp
|
388 |
+
virtualenv==20.31.2
|
389 |
+
# via pre-commit
|
390 |
+
websockets==15.0.1
|
391 |
+
# via
|
392 |
+
# fastmcp
|
393 |
+
# gradio-client
|
394 |
+
xxhash==3.5.0
|
395 |
+
# via langgraph
|
396 |
+
yarl==1.20.0
|
397 |
+
# via aiohttp
|
398 |
+
zipp==3.22.0
|
399 |
+
# via importlib-metadata
|
400 |
+
zstandard==0.23.0
|
401 |
+
# via langsmith
|