tech-envision commited on
Commit
fed605d
·
unverified ·
2 Parent(s): 14b9f7a a8ee91f

Merge pull request #26 from EnvisionMindCa/codex/make-vm-asynchronous-and-modify-llm-flow

Browse files
Files changed (6) hide show
  1. README.md +3 -2
  2. src/__init__.py +8 -2
  3. src/chat.py +67 -18
  4. src/config.py +8 -6
  5. src/tools.py +8 -1
  6. src/vm.py +8 -0
README.md CHANGED
@@ -8,8 +8,9 @@ conversations can be resumed with context. One example tool is included:
8
  * **execute_terminal** – Executes a shell command inside a persistent Linux VM
9
  with network access. Use it to read uploaded documents under ``/data`` or run
10
  other commands. Output from ``stdout`` and ``stderr`` is captured and
11
- returned. The VM is created when a chat session starts and reused for all
12
- subsequent tool calls.
 
13
 
14
  The application injects a robust system prompt on each request. The prompt
15
  guides the model to plan tool usage, execute commands sequentially and
 
8
  * **execute_terminal** – Executes a shell command inside a persistent Linux VM
9
  with network access. Use it to read uploaded documents under ``/data`` or run
10
  other commands. Output from ``stdout`` and ``stderr`` is captured and
11
+ returned. Commands run asynchronously so the assistant can continue
12
+ responding while they execute. The VM is created when a chat session starts
13
+ and reused for all subsequent tool calls.
14
 
15
  The application injects a robust system prompt on each request. The prompt
16
  guides the model to plan tool usage, execute commands sequentially and
src/__init__.py CHANGED
@@ -1,5 +1,11 @@
1
  from .chat import ChatSession
2
- from .tools import execute_terminal, set_vm
3
  from .vm import LinuxVM
4
 
5
- __all__ = ["ChatSession", "execute_terminal", "set_vm", "LinuxVM"]
 
 
 
 
 
 
 
1
  from .chat import ChatSession
2
+ from .tools import execute_terminal, execute_terminal_async, set_vm
3
  from .vm import LinuxVM
4
 
5
+ __all__ = [
6
+ "ChatSession",
7
+ "execute_terminal",
8
+ "execute_terminal_async",
9
+ "set_vm",
10
+ "LinuxVM",
11
+ ]
src/chat.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
 
3
  from typing import List
4
  import json
 
5
  import shutil
6
  from pathlib import Path
7
 
@@ -26,7 +27,7 @@ from .db import (
26
  )
27
  from .log import get_logger
28
  from .schema import Msg
29
- from .tools import execute_terminal, set_vm
30
  from .vm import VMRegistry
31
 
32
  _LOG = get_logger(__name__)
@@ -141,29 +142,77 @@ class ChatSession:
141
  ) -> ChatResponse:
142
  while depth < MAX_TOOL_CALL_DEPTH and response.message.tool_calls:
143
  for call in response.message.tool_calls:
144
- if call.function.name == "execute_terminal":
145
- result = execute_terminal(**call.function.arguments)
146
- else:
147
  _LOG.warning("Unsupported tool call: %s", call.function.name)
148
  result = f"Unsupported tool: {call.function.name}"
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
- messages.append(
151
- {
152
- "role": "tool",
153
- "name": call.function.name,
154
- "content": str(result),
155
- }
156
  )
157
- DBMessage.create(
158
- conversation=conversation,
159
- role="tool",
160
- content=str(result),
 
161
  )
162
 
163
- nxt = await self.ask(messages, think=True)
164
- self._store_assistant_message(conversation, nxt.message)
165
- response = nxt
166
- depth += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  return response
169
 
 
2
 
3
  from typing import List
4
  import json
5
+ import asyncio
6
  import shutil
7
  from pathlib import Path
8
 
 
27
  )
28
  from .log import get_logger
29
  from .schema import Msg
30
+ from .tools import execute_terminal, execute_terminal_async, set_vm
31
  from .vm import VMRegistry
32
 
33
  _LOG = get_logger(__name__)
 
142
  ) -> ChatResponse:
143
  while depth < MAX_TOOL_CALL_DEPTH and response.message.tool_calls:
144
  for call in response.message.tool_calls:
145
+ if call.function.name != "execute_terminal":
 
 
146
  _LOG.warning("Unsupported tool call: %s", call.function.name)
147
  result = f"Unsupported tool: {call.function.name}"
148
+ messages.append(
149
+ {
150
+ "role": "tool",
151
+ "name": call.function.name,
152
+ "content": result,
153
+ }
154
+ )
155
+ DBMessage.create(
156
+ conversation=conversation,
157
+ role="tool",
158
+ content=result,
159
+ )
160
+ continue
161
 
162
+ exec_task = asyncio.create_task(
163
+ execute_terminal_async(**call.function.arguments)
 
 
 
 
164
  )
165
+ follow_task = asyncio.create_task(self.ask(messages, think=True))
166
+
167
+ done, _ = await asyncio.wait(
168
+ {exec_task, follow_task},
169
+ return_when=asyncio.FIRST_COMPLETED,
170
  )
171
 
172
+ if exec_task in done:
173
+ follow_task.cancel()
174
+ try:
175
+ await follow_task
176
+ except Exception:
177
+ pass
178
+ result = await exec_task
179
+ messages.append(
180
+ {
181
+ "role": "tool",
182
+ "name": call.function.name,
183
+ "content": result,
184
+ }
185
+ )
186
+ DBMessage.create(
187
+ conversation=conversation,
188
+ role="tool",
189
+ content=result,
190
+ )
191
+ nxt = await self.ask(messages, think=True)
192
+ self._store_assistant_message(conversation, nxt.message)
193
+ response = nxt
194
+ else:
195
+ followup = await follow_task
196
+ self._store_assistant_message(conversation, followup.message)
197
+ messages.append(followup.message.model_dump())
198
+ result = await exec_task
199
+ messages.append(
200
+ {
201
+ "role": "tool",
202
+ "name": call.function.name,
203
+ "content": result,
204
+ }
205
+ )
206
+ DBMessage.create(
207
+ conversation=conversation,
208
+ role="tool",
209
+ content=result,
210
+ )
211
+ nxt = await self.ask(messages, think=True)
212
+ self._store_assistant_message(conversation, nxt.message)
213
+ response = nxt
214
+
215
+ depth += 1
216
 
217
  return response
218
 
src/config.py CHANGED
@@ -13,10 +13,12 @@ UPLOAD_DIR: Final[str] = os.getenv("UPLOAD_DIR", str(Path.cwd() / "uploads"))
13
  SYSTEM_PROMPT: Final[str] = (
14
  "You are Starlette, a professional AI assistant with advanced tool orchestration. "
15
  "Always analyze the user's objective before responding. If tools are needed, "
16
- "outline a step-by-step plan and invoke each tool sequentially, waiting for its "
17
- "result before proceeding. Retry or adjust commands when they fail and summarize "
18
- "important outputs to preserve context. Uploaded files live under /data and are "
19
- "accessible via the execute_terminal tool. Continue using tools until you have "
20
- "gathered everything required to produce an accurate answer, then craft a clear "
21
- "and precise final response that fully addresses the request."
 
 
22
  )
 
13
  SYSTEM_PROMPT: Final[str] = (
14
  "You are Starlette, a professional AI assistant with advanced tool orchestration. "
15
  "Always analyze the user's objective before responding. If tools are needed, "
16
+ "outline a step-by-step plan and invoke each tool sequentially. Shell commands "
17
+ "execute asynchronously, so provide a brief interim reply while waiting. Once a "
18
+ "tool returns its result you will receive a tool message and must continue from "
19
+ "there. If the result arrives before your interim reply is complete, cancel the "
20
+ "reply and incorporate the tool output instead. Uploaded files live under /data "
21
+ "and are accessible via the execute_terminal tool. Continue using tools until "
22
+ "you have gathered everything required to produce an accurate answer, then craft "
23
+ "a clear and precise final response that fully addresses the request."
24
  )
src/tools.py CHANGED
@@ -1,9 +1,10 @@
1
  from __future__ import annotations
2
 
3
- __all__ = ["execute_terminal", "set_vm"]
4
 
5
  import subprocess
6
  from typing import Optional
 
7
 
8
  from .vm import LinuxVM
9
 
@@ -53,3 +54,9 @@ def execute_terminal(command: str) -> str:
53
  if completed.stderr:
54
  output = f"{output}\n{completed.stderr}" if output else completed.stderr
55
  return output.strip()
 
 
 
 
 
 
 
1
  from __future__ import annotations
2
 
3
+ __all__ = ["execute_terminal", "execute_terminal_async", "set_vm"]
4
 
5
  import subprocess
6
  from typing import Optional
7
+ import asyncio
8
 
9
  from .vm import LinuxVM
10
 
 
54
  if completed.stderr:
55
  output = f"{output}\n{completed.stderr}" if output else completed.stderr
56
  return output.strip()
57
+
58
+
59
+ async def execute_terminal_async(command: str) -> str:
60
+ """Asynchronously execute a shell command."""
61
+ loop = asyncio.get_running_loop()
62
+ return await loop.run_in_executor(None, execute_terminal, command)
src/vm.py CHANGED
@@ -1,6 +1,8 @@
1
  from __future__ import annotations
2
 
3
  import subprocess
 
 
4
  import uuid
5
  from pathlib import Path
6
 
@@ -90,6 +92,12 @@ class LinuxVM:
90
  output = f"{output}\n{completed.stderr}" if output else completed.stderr
91
  return output.strip()
92
 
 
 
 
 
 
 
93
  def stop(self) -> None:
94
  """Terminate the VM if running."""
95
  if not self._running:
 
1
  from __future__ import annotations
2
 
3
  import subprocess
4
+ import asyncio
5
+ from functools import partial
6
  import uuid
7
  from pathlib import Path
8
 
 
92
  output = f"{output}\n{completed.stderr}" if output else completed.stderr
93
  return output.strip()
94
 
95
+ async def execute_async(self, command: str, *, timeout: int = 3) -> str:
96
+ """Asynchronously execute ``command`` inside the running VM."""
97
+ loop = asyncio.get_running_loop()
98
+ func = partial(self.execute, command, timeout=timeout)
99
+ return await loop.run_in_executor(None, func)
100
+
101
  def stop(self) -> None:
102
  """Terminate the VM if running."""
103
  if not self._running: