Commit
·
c4acae9
1
Parent(s):
317bb3f
change logic of show AIMessages
Browse files
app.py
CHANGED
@@ -99,21 +99,29 @@ async def chat_logic(
|
|
99 |
# app_input["github_repo"] = github_repo
|
100 |
# Stream the graph's response.
|
101 |
# This revised logic handles intermediate messages and prevents duplication.
|
|
|
|
|
|
|
102 |
async for res in app.astream(app_input, config=config, stream_mode="values"):
|
103 |
-
|
104 |
-
|
105 |
-
# We only stream content from AIMessages. Any intermediate AIMessages
|
106 |
-
# (e.g., "I will now use a tool") will be overwritten by subsequent
|
107 |
-
# AIMessages in the UI, so only the final answer is visible.
|
108 |
-
if isinstance(last_message, AIMessage):
|
109 |
-
yield last_message.content
|
110 |
-
|
111 |
-
elif "__interrupt__" in res:
|
112 |
-
# Handle interruptions where the agent needs human feedback.
|
113 |
-
interruption_message = res["__interrupt__"][0]
|
114 |
session_state["is_message_command"] = True
|
115 |
-
yield
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
|
119 |
def create_gradio_app():
|
|
|
99 |
# app_input["github_repo"] = github_repo
|
100 |
# Stream the graph's response.
|
101 |
# This revised logic handles intermediate messages and prevents duplication.
|
102 |
+
|
103 |
+
final_reply = None # buffer for the last AIMessage we see
|
104 |
+
|
105 |
async for res in app.astream(app_input, config=config, stream_mode="values"):
|
106 |
+
# ── 1) Handle human-interrupts immediately ─────────────────────────
|
107 |
+
if "__interrupt__" in res:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
session_state["is_message_command"] = True
|
109 |
+
# yield the interrupt text straight away
|
110 |
+
yield res["__interrupt__"][0].value
|
111 |
+
return # stop processing until user replies
|
112 |
+
|
113 |
+
# ── 2) Remember the latest AIMessage we’ve seen ────────────────────
|
114 |
+
if "messages" in res:
|
115 |
+
last = res["messages"][-1]
|
116 |
+
if isinstance(last, AIMessage):
|
117 |
+
final_reply = last.content
|
118 |
+
|
119 |
+
# ── 3) After the graph stops, emit the buffered final answer ───────────
|
120 |
+
if final_reply is not None:
|
121 |
+
yield final_reply # exactly one assistant chunk
|
122 |
+
else:
|
123 |
+
# fail-safe: graph produced no AIMessage
|
124 |
+
yield "✅ Done"
|
125 |
|
126 |
|
127 |
def create_gradio_app():
|