Spaces:
Runtime error
Runtime error
tech-envision
commited on
Commit
·
ec335c4
1
Parent(s):
99da480
fix: store tool calls as json and avoid duplicate messages
Browse files- src/chat.py +16 -17
src/chat.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
from typing import List
|
|
|
4 |
|
5 |
from ollama import AsyncClient, ChatResponse
|
6 |
|
@@ -26,6 +27,19 @@ class ChatSession:
|
|
26 |
if not _db.is_closed():
|
27 |
_db.close()
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
async def ask(self, messages: List[Msg], *, think: bool = True) -> ChatResponse:
|
30 |
return await self._client.chat(
|
31 |
self._model,
|
@@ -60,11 +74,7 @@ class ChatSession:
|
|
60 |
content=str(result),
|
61 |
)
|
62 |
nxt = await self.ask(messages, think=True)
|
63 |
-
|
64 |
-
conversation=conversation,
|
65 |
-
role="assistant",
|
66 |
-
content=nxt.message.content,
|
67 |
-
)
|
68 |
return await self._handle_tool_calls(
|
69 |
messages, nxt, conversation, depth + 1
|
70 |
)
|
@@ -77,20 +87,9 @@ class ChatSession:
|
|
77 |
messages: List[Msg] = [{"role": "user", "content": prompt}]
|
78 |
response = await self.ask(messages)
|
79 |
messages.append(response.message.model_dump())
|
80 |
-
|
81 |
-
conversation=conversation,
|
82 |
-
role="assistant",
|
83 |
-
content=response.message.content,
|
84 |
-
)
|
85 |
|
86 |
_LOG.info("Thinking:\n%s", response.message.thinking or "<no thinking trace>")
|
87 |
|
88 |
final_resp = await self._handle_tool_calls(messages, response, conversation)
|
89 |
-
if final_resp is not response:
|
90 |
-
# final response after handling tool calls
|
91 |
-
Message.create(
|
92 |
-
conversation=conversation,
|
93 |
-
role="assistant",
|
94 |
-
content=final_resp.message.content,
|
95 |
-
)
|
96 |
return final_resp.message.content
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
from typing import List
|
4 |
+
import json
|
5 |
|
6 |
from ollama import AsyncClient, ChatResponse
|
7 |
|
|
|
27 |
if not _db.is_closed():
|
28 |
_db.close()
|
29 |
|
30 |
+
@staticmethod
|
31 |
+
def _store_assistant_message(
|
32 |
+
conversation: Conversation, message: ChatResponse.Message
|
33 |
+
) -> None:
|
34 |
+
"""Persist assistant messages, storing tool calls when present."""
|
35 |
+
|
36 |
+
if message.tool_calls:
|
37 |
+
content = json.dumps([c.model_dump() for c in message.tool_calls])
|
38 |
+
else:
|
39 |
+
content = message.content or ""
|
40 |
+
|
41 |
+
Message.create(conversation=conversation, role="assistant", content=content)
|
42 |
+
|
43 |
async def ask(self, messages: List[Msg], *, think: bool = True) -> ChatResponse:
|
44 |
return await self._client.chat(
|
45 |
self._model,
|
|
|
74 |
content=str(result),
|
75 |
)
|
76 |
nxt = await self.ask(messages, think=True)
|
77 |
+
self._store_assistant_message(conversation, nxt.message)
|
|
|
|
|
|
|
|
|
78 |
return await self._handle_tool_calls(
|
79 |
messages, nxt, conversation, depth + 1
|
80 |
)
|
|
|
87 |
messages: List[Msg] = [{"role": "user", "content": prompt}]
|
88 |
response = await self.ask(messages)
|
89 |
messages.append(response.message.model_dump())
|
90 |
+
self._store_assistant_message(conversation, response.message)
|
|
|
|
|
|
|
|
|
91 |
|
92 |
_LOG.info("Thinking:\n%s", response.message.thinking or "<no thinking trace>")
|
93 |
|
94 |
final_resp = await self._handle_tool_calls(messages, response, conversation)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
return final_resp.message.content
|