faq / startup.sh
brendon-ai's picture
Update startup.sh
4974b02 verified
raw
history blame
1.47 kB
#!/bin/bash
# Exit on any error
set -e
echo "=== Starting Ollama API Server ==="
# Set Ollama environment variables
export OLLAMA_HOST=0.0.0.0:11434
export OLLAMA_MODELS=/app/.ollama/models
export OLLAMA_HOME=/app/.ollama
export OLLAMA_ORIGINS="*"
# Create models directory if it doesn't exist
mkdir -p /app/.ollama/models
# Start Ollama server in the background
echo "Starting Ollama server..."
ollama serve &
OLLAMA_PID=$!
echo "Ollama server started with PID: $OLLAMA_PID"
# Function to check if Ollama is ready
check_ollama() {
curl -s http://localhost:11434/api/tags > /dev/null 2>&1
return $?
}
# Wait for Ollama server to be ready
echo "Waiting for Ollama server to be ready..."
MAX_WAIT=60
WAIT_COUNT=0
while ! check_ollama; do
if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
echo "❌ Ollama server failed to start within $MAX_WAIT seconds"
kill $OLLAMA_PID 2>/dev/null || true
exit 1
fi
echo "⏳ Waiting for Ollama server... ($((WAIT_COUNT + 1))/$MAX_WAIT)"
sleep 1
WAIT_COUNT=$((WAIT_COUNT + 1))
done
echo "βœ… Ollama server is ready!"
# Optional: Pull a small model for testing
echo "Pulling a lightweight model (this may take a few minutes)..."
ollama pull tinyllama &
# Start the main application
echo "Starting main application..."
python3 app.py
# If we reach here, something went wrong with the main app
echo "❌ Main application exited unexpectedly"
kill $OLLAMA_PID 2>/dev/null || true