File size: 1,466 Bytes
1f9f492
 
4974b02
 
 
1f9f492
 
4974b02
 
 
 
 
1f9f492
4974b02
 
1f9f492
4974b02
 
 
 
 
1f9f492
4974b02
 
 
 
1f9f492
 
4974b02
 
 
 
1f9f492
4974b02
 
 
 
 
 
 
 
 
 
 
1f9f492
4974b02
1f9f492
4974b02
 
 
1f9f492
4974b02
 
 
1f9f492
4974b02
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#!/bin/bash

# Exit on any error
set -e

echo "=== Starting Ollama API Server ==="

# Set Ollama environment variables
export OLLAMA_HOST=0.0.0.0:11434
export OLLAMA_MODELS=/app/.ollama/models
export OLLAMA_HOME=/app/.ollama
export OLLAMA_ORIGINS="*"

# Create models directory if it doesn't exist
mkdir -p /app/.ollama/models

# Start Ollama server in the background
echo "Starting Ollama server..."
ollama serve &
OLLAMA_PID=$!
echo "Ollama server started with PID: $OLLAMA_PID"

# Function to check if Ollama is ready
check_ollama() {
    curl -s http://localhost:11434/api/tags > /dev/null 2>&1
    return $?
}

# Wait for Ollama server to be ready
echo "Waiting for Ollama server to be ready..."
MAX_WAIT=60
WAIT_COUNT=0

while ! check_ollama; do
    if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
        echo "❌ Ollama server failed to start within $MAX_WAIT seconds"
        kill $OLLAMA_PID 2>/dev/null || true
        exit 1
    fi
    
    echo "⏳ Waiting for Ollama server... ($((WAIT_COUNT + 1))/$MAX_WAIT)"
    sleep 1
    WAIT_COUNT=$((WAIT_COUNT + 1))
done

echo "✅ Ollama server is ready!"

# Optional: Pull a small model for testing
echo "Pulling a lightweight model (this may take a few minutes)..."
ollama pull tinyllama &

# Start the main application
echo "Starting main application..."
python3 app.py

# If we reach here, something went wrong with the main app
echo "❌ Main application exited unexpectedly"
kill $OLLAMA_PID 2>/dev/null || true