brendon-ai commited on
Commit
62e7314
·
verified ·
1 Parent(s): 16bea09

Update startup.sh

Browse files
Files changed (1) hide show
  1. startup.sh +50 -56
startup.sh CHANGED
@@ -1,56 +1,50 @@
1
- # Use Ubuntu as base image for better Ollama compatibility
2
- FROM ubuntu:22.04
3
-
4
- # Prevent interactive prompts during package installation
5
- ENV DEBIAN_FRONTEND=noninteractive
6
-
7
- # Set working directory
8
- WORKDIR /app
9
-
10
- # Install system dependencies
11
- RUN apt-get update && apt-get install -y \
12
- curl \
13
- python3 \
14
- python3-pip \
15
- python3-venv \
16
- wget \
17
- ca-certificates \
18
- sudo \
19
- && rm -rf /var/lib/apt/lists/*
20
-
21
- # Create and activate virtual environment
22
- RUN python3 -m venv /opt/venv
23
- ENV PATH="/opt/venv/bin:$PATH"
24
-
25
- # Install Python dependencies
26
- COPY requirements.txt .
27
- RUN pip install --no-cache-dir --upgrade pip && \
28
- pip install --no-cache-dir -r requirements.txt
29
-
30
- # Install Ollama
31
- RUN curl -fsSL https://ollama.ai/install.sh | sh
32
-
33
- # No need to create ollama directories - will use /tmp
34
-
35
- # Copy application files
36
- COPY app.py .
37
- COPY startup.sh .
38
-
39
- # Make startup script executable
40
- RUN chmod +x startup.sh
41
-
42
- # Set environment variables for Ollama (using /tmp for guaranteed write access)
43
- ENV HOME=/tmp
44
- ENV OLLAMA_HOST=0.0.0.0:11434
45
- ENV OLLAMA_MODELS=/tmp/ollama/models
46
- ENV OLLAMA_HOME=/tmp/ollama
47
-
48
- # Expose ports
49
- EXPOSE 7860 11434
50
-
51
- # Health check
52
- HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
53
- CMD curl -f http://localhost:7860/health || exit 1
54
-
55
- # Start both Ollama server and FastAPI app
56
- CMD ["./startup.sh"]
 
1
+ #!/bin/bash
2
+
3
+ # Exit on any error
4
+ set -e
5
+
6
+ echo "Starting Ollama and FastAPI application..."
7
+
8
+ # Create necessary directories
9
+ mkdir -p /tmp/ollama/models
10
+ export OLLAMA_MODELS=/tmp/ollama/models
11
+ export OLLAMA_HOME=/tmp/ollama
12
+
13
+ # Start Ollama server in the background
14
+ echo "Starting Ollama server..."
15
+ ollama serve &
16
+ OLLAMA_PID=$!
17
+
18
+ # Wait for Ollama to be ready
19
+ echo "Waiting for Ollama server to start..."
20
+ for i in {1..30}; do
21
+ if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
22
+ echo "Ollama server is ready!"
23
+ break
24
+ fi
25
+ if [ $i -eq 30 ]; then
26
+ echo "Timeout waiting for Ollama server to start"
27
+ exit 1
28
+ fi
29
+ sleep 2
30
+ done
31
+
32
+ # Optional: Pull a model (uncomment and modify as needed)
33
+ # echo "Pulling llama2 model..."
34
+ # ollama pull llama2
35
+
36
+ # Start FastAPI application
37
+ echo "Starting FastAPI application..."
38
+ python app.py
39
+
40
+ # Keep the script running and handle cleanup
41
+ cleanup() {
42
+ echo "Shutting down..."
43
+ kill $OLLAMA_PID 2>/dev/null || true
44
+ exit 0
45
+ }
46
+
47
+ trap cleanup SIGTERM SIGINT
48
+
49
+ # Wait for background processes
50
+ wait