brendon-ai commited on
Commit
cb7d329
·
verified ·
1 Parent(s): 5d20705

Update startup.sh

Browse files
Files changed (1) hide show
  1. startup.sh +56 -43
startup.sh CHANGED
@@ -1,43 +1,56 @@
1
- #!/bin/bash
2
-
3
- echo "=== Starting Ollama Generate API ==="
4
-
5
- # Set HOME to /tmp to avoid permission issues with /.ollama
6
- export HOME=/tmp
7
-
8
- # Use /tmp to avoid any permission issues
9
- export OLLAMA_HOST=0.0.0.0:11434
10
- export OLLAMA_MODELS=/tmp/ollama/models
11
- export OLLAMA_HOME=/tmp/ollama
12
- export OLLAMA_ORIGINS="*"
13
-
14
- # Create directories
15
- mkdir -p /tmp/ollama/models
16
- mkdir -p /tmp/.ollama
17
- echo "✅ Created Ollama directories in /tmp"
18
-
19
- # Start Ollama server
20
- echo "Starting Ollama server..."
21
- ollama serve &
22
- OLLAMA_PID=$!
23
-
24
- # Wait for server to start
25
- echo "Waiting for Ollama server..."
26
- sleep 10
27
-
28
- # Check if server is running
29
- if curl -s http://localhost:11434/api/tags > /dev/null; then
30
- echo "✅ Ollama server is running!"
31
- else
32
- echo "❌ Ollama server failed to start"
33
- kill $OLLAMA_PID 2>/dev/null || true
34
- exit 1
35
- fi
36
-
37
- # Pull tiny model in background
38
- echo "Pulling TinyLlama model..."
39
- ollama pull tinyllama &
40
-
41
- # Start FastAPI app
42
- echo "Starting FastAPI application..."
43
- python3 app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Ubuntu as base image for better Ollama compatibility
2
+ FROM ubuntu:22.04
3
+
4
+ # Prevent interactive prompts during package installation
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ # Set working directory
8
+ WORKDIR /app
9
+
10
+ # Install system dependencies
11
+ RUN apt-get update && apt-get install -y \
12
+ curl \
13
+ python3 \
14
+ python3-pip \
15
+ python3-venv \
16
+ wget \
17
+ ca-certificates \
18
+ sudo \
19
+ && rm -rf /var/lib/apt/lists/*
20
+
21
+ # Create and activate virtual environment
22
+ RUN python3 -m venv /opt/venv
23
+ ENV PATH="/opt/venv/bin:$PATH"
24
+
25
+ # Install Python dependencies
26
+ COPY requirements.txt .
27
+ RUN pip install --no-cache-dir --upgrade pip && \
28
+ pip install --no-cache-dir -r requirements.txt
29
+
30
+ # Install Ollama
31
+ RUN curl -fsSL https://ollama.ai/install.sh | sh
32
+
33
+ # No need to create ollama directories - will use /tmp
34
+
35
+ # Copy application files
36
+ COPY app.py .
37
+ COPY startup.sh .
38
+
39
+ # Make startup script executable
40
+ RUN chmod +x startup.sh
41
+
42
+ # Set environment variables for Ollama (using /tmp for guaranteed write access)
43
+ ENV HOME=/tmp
44
+ ENV OLLAMA_HOST=0.0.0.0:11434
45
+ ENV OLLAMA_MODELS=/tmp/ollama/models
46
+ ENV OLLAMA_HOME=/tmp/ollama
47
+
48
+ # Expose ports
49
+ EXPOSE 7860 11434
50
+
51
+ # Health check
52
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
53
+ CMD curl -f http://localhost:7860/health || exit 1
54
+
55
+ # Start both Ollama server and FastAPI app
56
+ CMD ["./startup.sh"]