faq / startup.sh
brendon-ai's picture
Create startup.sh
1f9f492 verified
raw
history blame
2.12 kB
#!/bin/bash
echo "=== Starting Ollama API Server ==="
# Function to check if Ollama is running
check_ollama() {
curl -s http://localhost:11434/api/version > /dev/null
return $?
}
# Function to start Ollama server
start_ollama() {
echo "Starting Ollama server..."
su - ollama -c "ollama serve" &
OLLAMA_PID=$!
echo "Ollama server started with PID: $OLLAMA_PID"
# Wait for Ollama to be ready
echo "Waiting for Ollama server to be ready..."
for i in {1..30}; do
if check_ollama; then
echo "βœ… Ollama server is ready!"
break
fi
echo "⏳ Waiting for Ollama server... ($i/30)"
sleep 2
done
if ! check_ollama; then
echo "❌ Ollama server failed to start properly"
exit 1
fi
}
# Function to start FastAPI
start_fastapi() {
echo "Starting FastAPI server..."
python app.py &
FASTAPI_PID=$!
echo "FastAPI server started with PID: $FASTAPI_PID"
}
# Function to handle shutdown
cleanup() {
echo "Shutting down services..."
if [ ! -z "$FASTAPI_PID" ]; then
kill $FASTAPI_PID 2>/dev/null
fi
if [ ! -z "$OLLAMA_PID" ]; then
kill $OLLAMA_PID 2>/dev/null
fi
exit 0
}
# Set up signal handlers
trap cleanup SIGTERM SIGINT
# Create necessary directories
mkdir -p /home/ollama/.ollama/models
chown -R ollama:ollama /home/ollama/.ollama
# Start Ollama server
start_ollama
# Start FastAPI server
start_fastapi
# Keep the script running and monitor processes
echo "πŸš€ Both services are running!"
echo "- Ollama server: http://localhost:11434"
echo "- FastAPI server: http://localhost:7860"
echo "- API Documentation: http://localhost:7860/docs"
# Monitor both processes
while true; do
# Check if Ollama is still running
if ! kill -0 $OLLAMA_PID 2>/dev/null; then
echo "❌ Ollama server stopped unexpectedly"
exit 1
fi
# Check if FastAPI is still running
if ! kill -0 $FASTAPI_PID 2>/dev/null; then
echo "❌ FastAPI server stopped unexpectedly"
exit 1
fi
sleep 10
done