klentyboopathi commited on
Commit
24352e2
·
1 Parent(s): 8362005

updated docker

Browse files
Files changed (4) hide show
  1. Dockerfile +2 -0
  2. bot/bot_websocket_server.py +55 -9
  3. server.py +45 -18
  4. setup.sh +7 -0
Dockerfile CHANGED
@@ -11,8 +11,10 @@ RUN pip install --no-cache-dir -r requirements.txt
11
  # Copy the backend code
12
  COPY . .
13
 
 
14
  # Expose the port the app runs on
15
  EXPOSE 7860
16
 
 
17
  # Run the application
18
  CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860"]
 
11
  # Copy the backend code
12
  COPY . .
13
 
14
+ RUN chmod +x ./setup.sh && ./setup.sh
15
  # Expose the port the app runs on
16
  EXPOSE 7860
17
 
18
+
19
  # Run the application
20
  CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860"]
bot/bot_websocket_server.py CHANGED
@@ -7,16 +7,21 @@ from pipecat.pipeline.pipeline import Pipeline
7
  from pipecat.pipeline.runner import PipelineRunner
8
  from pipecat.pipeline.task import PipelineParams, PipelineTask
9
  from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
 
10
 
11
  from pipecat.services.ollama.llm import OLLamaLLMService
12
 
13
  # from pipecat.services.fish.tts import FishAudioTTSService
14
- from pipecat.services.xtts.tts import XTTSService
15
  from pipecat.transcriptions.language import Language
16
-
17
 
18
  from pipecat.processors.frameworks.rtvi import RTVIConfig, RTVIObserver, RTVIProcessor
19
  from pipecat.serializers.protobuf import ProtobufFrameSerializer
 
 
 
 
20
  from pipecat.services.whisper.stt import WhisperSTTService
21
  from pipecat.transports.network.websocket_server import (
22
  WebsocketServerParams,
@@ -24,10 +29,14 @@ from pipecat.transports.network.websocket_server import (
24
  )
25
  import aiohttp
26
 
 
27
  from service.Kokoro.tts import KokoroTTSService
28
- from service.orpheus.tts import OrpheusTTSService
 
29
  # from service.chatterbot.tts import ChatterboxTTSService
30
 
 
 
31
  SYSTEM_INSTRUCTION = f"""
32
  "You are Gemini Chatbot, a friendly, helpful robot.
33
 
@@ -38,17 +47,45 @@ Your output will be converted to audio so don't include special characters in yo
38
  Respond to what the user said in a creative and helpful way. Keep your responses brief. One or two sentences at most.
39
  """
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- async def run_bot_websocket_server():
43
- ws_transport = WebsocketServerTransport(
44
- params=WebsocketServerParams(
45
- serializer=ProtobufFrameSerializer(),
46
  audio_in_enabled=True,
47
  audio_out_enabled=True,
48
  add_wav_header=False,
49
  vad_analyzer=SileroVADAnalyzer(),
50
- session_timeout=60 * 3, # 3 minutes
51
- )
52
  )
53
 
54
  stt = WhisperSTTService(
@@ -110,6 +147,11 @@ async def run_bot_websocket_server():
110
  # model_name="",
111
  # sample_rate=16000,
112
  # )
 
 
 
 
 
113
  pipeline = Pipeline(
114
  [
115
  ws_transport.input(),
@@ -127,8 +169,12 @@ async def run_bot_websocket_server():
127
  pipeline,
128
  params=PipelineParams(
129
  enable_metrics=True,
 
130
  enable_usage_metrics=True,
131
  ),
 
 
 
132
  observers=[RTVIObserver(rtvi)],
133
  )
134
 
 
7
  from pipecat.pipeline.runner import PipelineRunner
8
  from pipecat.pipeline.task import PipelineParams, PipelineTask
9
  from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
10
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
11
 
12
  from pipecat.services.ollama.llm import OLLamaLLMService
13
 
14
  # from pipecat.services.fish.tts import FishAudioTTSService
15
+ # from pipecat.services.xtts.tts import XTTSService
16
  from pipecat.transcriptions.language import Language
17
+ # from service.Dia.tts import DiaTTSService
18
 
19
  from pipecat.processors.frameworks.rtvi import RTVIConfig, RTVIObserver, RTVIProcessor
20
  from pipecat.serializers.protobuf import ProtobufFrameSerializer
21
+ from pipecat.transports.network.fastapi_websocket import (
22
+ FastAPIWebsocketParams,
23
+ FastAPIWebsocketTransport,
24
+ )
25
  from pipecat.services.whisper.stt import WhisperSTTService
26
  from pipecat.transports.network.websocket_server import (
27
  WebsocketServerParams,
 
29
  )
30
  import aiohttp
31
 
32
+ from dotenv import load_dotenv
33
  from service.Kokoro.tts import KokoroTTSService
34
+ # from service.orpheus.tts import OrpheusTTSService
35
+
36
  # from service.chatterbot.tts import ChatterboxTTSService
37
 
38
+ # from pipecat.utils.tracing.setup import setup_tracing
39
+
40
  SYSTEM_INSTRUCTION = f"""
41
  "You are Gemini Chatbot, a friendly, helpful robot.
42
 
 
47
  Respond to what the user said in a creative and helpful way. Keep your responses brief. One or two sentences at most.
48
  """
49
 
50
+ load_dotenv(override=True)
51
+
52
+ # IS_TRACING_ENABLED = bool(os.getenv("ENABLE_TRACING"))
53
+
54
+ # # Initialize tracing if enabled
55
+ # if IS_TRACING_ENABLED:
56
+ # # Create the exporter
57
+ # otlp_exporter = OTLPSpanExporter()
58
+
59
+ # # Set up tracing with the exporter
60
+ # setup_tracing(
61
+ # service_name="pipecat-demo",
62
+ # exporter=otlp_exporter,
63
+ # console_export=bool(os.getenv("OTEL_CONSOLE_EXPORT")),
64
+ # )
65
+ # logger.info("OpenTelemetry tracing initialized")
66
+
67
+
68
+ async def run_bot_websocket_server(websocket_client):
69
+ # ws_transport = WebsocketServerTransport(
70
+ # params=WebsocketServerParams(
71
+ # serializer=ProtobufFrameSerializer(),
72
+ # audio_in_enabled=True,
73
+ # audio_out_enabled=True,
74
+ # add_wav_header=False,
75
+ # vad_analyzer=SileroVADAnalyzer(),
76
+ # session_timeout=60 * 3, # 3 minutes
77
+ # )
78
+ # )
79
 
80
+ ws_transport = FastAPIWebsocketTransport(
81
+ websocket=websocket_client,
82
+ params=FastAPIWebsocketParams(
 
83
  audio_in_enabled=True,
84
  audio_out_enabled=True,
85
  add_wav_header=False,
86
  vad_analyzer=SileroVADAnalyzer(),
87
+ serializer=ProtobufFrameSerializer(),
88
+ ),
89
  )
90
 
91
  stt = WhisperSTTService(
 
147
  # model_name="",
148
  # sample_rate=16000,
149
  # )
150
+
151
+ # TTS = DiaTTSService(
152
+ # model_name="nari-labs/Dia-1.6B",
153
+ # sample_rate=16000,
154
+ # )
155
  pipeline = Pipeline(
156
  [
157
  ws_transport.input(),
 
169
  pipeline,
170
  params=PipelineParams(
171
  enable_metrics=True,
172
+ allow_interruptions=True,
173
  enable_usage_metrics=True,
174
  ),
175
+ # enable_turn_tracking=True,
176
+ # enable_tracing=IS_TRACING_ENABLED,
177
+ conversation_id="test",
178
  observers=[RTVIObserver(rtvi)],
179
  )
180
 
server.py CHANGED
@@ -1,25 +1,37 @@
1
- # main.py
 
 
 
 
2
  import asyncio
3
  import os
 
4
  from contextlib import asynccontextmanager
5
  from typing import Any, Dict
6
 
7
  import uvicorn
8
  from dotenv import load_dotenv
9
- from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
10
  from fastapi.middleware.cors import CORSMiddleware
11
- from fastapi.staticfiles import StaticFiles
12
 
13
- from bot.bot_websocket_server import run_bot_websocket_server
 
14
 
 
 
15
  load_dotenv(override=True)
16
 
 
17
  @asynccontextmanager
18
  async def lifespan(app: FastAPI):
19
- yield # Handle startup/shutdown
 
 
20
 
 
21
  app = FastAPI(lifespan=lifespan)
22
 
 
23
  app.add_middleware(
24
  CORSMiddleware,
25
  allow_origins=["*"],
@@ -28,28 +40,43 @@ app.add_middleware(
28
  allow_headers=["*"],
29
  )
30
 
 
31
  @app.websocket("/ws")
32
  async def websocket_endpoint(websocket: WebSocket):
33
  await websocket.accept()
34
  print("WebSocket connection accepted")
35
  try:
36
- await run_bot_websocket_server(websocket) # Just the handler, not a server
37
- except WebSocketDisconnect:
38
- print("WebSocket disconnected")
39
  except Exception as e:
40
- print(f"Error in WebSocket handler: {e}")
 
41
 
42
  @app.post("/connect")
43
  async def bot_connect(request: Request) -> Dict[Any, Any]:
44
- proto = "ws"
45
- if "x-forwarded-proto" in request.headers and request.headers["x-forwarded-proto"] == "https":
46
- proto = "wss"
47
-
48
- host = request.headers.get("host", "localhost:7860")
49
-
50
- return {"ws_url": f"{proto}://{host}/ws"}
51
 
52
- app.mount("/", StaticFiles(directory="static", html=True), name="static")
53
 
54
  if __name__ == "__main__":
55
- uvicorn.run("server:app", host="0.0.0.0", port=7860, reload=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
  import asyncio
7
  import os
8
+ import sys
9
  from contextlib import asynccontextmanager
10
  from typing import Any, Dict
11
 
12
  import uvicorn
13
  from dotenv import load_dotenv
14
+ from fastapi import FastAPI, Request, WebSocket
15
  from fastapi.middleware.cors import CORSMiddleware
 
16
 
17
+ # from bot.bot_fast_api import run_bot
18
+ from bot.bot_websocket_server import run_bot_websocket_server
19
 
20
+
21
+ # Load environment variables
22
  load_dotenv(override=True)
23
 
24
+
25
  @asynccontextmanager
26
  async def lifespan(app: FastAPI):
27
+ """Handles FastAPI startup and shutdown."""
28
+ yield # Run app
29
+
30
 
31
+ # Initialize FastAPI app with lifespan manager
32
  app = FastAPI(lifespan=lifespan)
33
 
34
+ # Configure CORS to allow requests from any origin
35
  app.add_middleware(
36
  CORSMiddleware,
37
  allow_origins=["*"],
 
40
  allow_headers=["*"],
41
  )
42
 
43
+
44
  @app.websocket("/ws")
45
  async def websocket_endpoint(websocket: WebSocket):
46
  await websocket.accept()
47
  print("WebSocket connection accepted")
48
  try:
49
+ await run_bot_websocket_server(websocket)
 
 
50
  except Exception as e:
51
+ print(f"Exception in run_bot: {e}")
52
+
53
 
54
  @app.post("/connect")
55
  async def bot_connect(request: Request) -> Dict[Any, Any]:
56
+ return {"ws_url": "ws://localhost:7860/ws"}
57
+
58
+
59
+ async def main():
60
+ config = uvicorn.Config(app, host="0.0.0.0", port=7860)
61
+ server = uvicorn.Server(config)
62
+ await server.serve()
63
 
 
64
 
65
  if __name__ == "__main__":
66
+ import signal
67
+
68
+ async def serve():
69
+ config = uvicorn.Config(app, host="0.0.0.0", port=7860)
70
+ server = uvicorn.Server(config)
71
+ await server.serve()
72
+
73
+ loop = asyncio.new_event_loop()
74
+ asyncio.set_event_loop(loop)
75
+
76
+ try:
77
+ loop.run_until_complete(serve())
78
+ except KeyboardInterrupt:
79
+ print("Received exit signal (Ctrl+C), shutting down...")
80
+ finally:
81
+ loop.run_until_complete(loop.shutdown_asyncgens())
82
+ loop.close()
setup.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ mkdir -p bot/assets
2
+ wget https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files-v1.0/kokoro-v1.0.onnx -O bot/assets/kokoro-v1.0.int8.onnx
3
+ wget https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files-v1.0/kokoro-v1.0.fp16-gpu.onnx -O bot/assets/kokoro-v1.0.fp16-gpu.onnx
4
+ wget https://huggingface.co/NeuML/kokoro-base-onnx/resolve/main/voices.json -O bot/assets/voices.json
5
+ curl -fsSL https://ollama.com/install.sh | sh
6
+ sleep 100
7
+ ollama pull llama3.1