chatbot / app.py
mobinln's picture
Update app.py
56edfcb verified
raw
history blame
2.02 kB
import subprocess
import gradio as gr
from openai import OpenAI
import json
from agno.agent import Agent, RunResponse
from agno.models.openai.like import OpenAILike
subprocess.Popen("bash /home/user/app/start.sh", shell=True)
agent = Agent(
model=OpenAILike(
id="model",
api_key="no-token",
base_url="http://0.0.0.0:8000/v1",
)
)
def handle_function_call(function_name, arguments):
"""Handle function calls from the model"""
if function_name == "browser_search":
# Implement your browser search logic here
query = arguments.get("query", "")
max_results = arguments.get("max_results", 5)
return f"Search results for '{query}' (max {max_results} results): [Implementation needed]"
elif function_name == "code_interpreter":
# Implement your code interpreter logic here
code = arguments.get("code", "")
if not code:
return "No code provided to execute."
return f"Code interpreter results for '{code}': [Implementation needed]"
return f"Unknown function: {function_name}"
def respond(
message,
history: list[tuple[str, str]] = [],
system_message=None,
):
messages = []
if system_message:
messages = [{"role": "system", "content": system_message}]
for user, assistant in history:
if user:
messages.append({"role": "user", "content": user})
if assistant:
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": message})
output = ""
try:
print("messages", messages)
stream = agent.run(messages=messages, stream=True)
for chunk in stream:
print("chunk", chunk)
output += chunk.content
yield output
except Exception as e:
print(f"[Error] {e}")
yield "⚠️ Llama.cpp server error"
demo = gr.ChatInterface(respond)
if __name__ == "__main__":
demo.launch(show_api=False)