brendon-ai commited on
Commit
de43d85
·
verified ·
1 Parent(s): 5fcc1da

Create startup.sh

Browse files
Files changed (1) hide show
  1. startup.sh +54 -0
startup.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Exit on any error
4
+ set -e
5
+
6
+ echo "Starting Ollama and FastAPI application..."
7
+
8
+ # Create necessary directories
9
+ mkdir -p /tmp/ollama/models
10
+ export OLLAMA_MODELS=/tmp/ollama/models
11
+ export OLLAMA_HOME=/tmp/ollama
12
+
13
+ # Start Ollama server in the background
14
+ echo "Starting Ollama server..."
15
+ ollama serve &
16
+ OLLAMA_PID=$!
17
+
18
+ # Wait for Ollama to be ready
19
+ echo "Waiting for Ollama server to start..."
20
+ for i in {1..30}; do
21
+ if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
22
+ echo "Ollama server is ready!"
23
+ break
24
+ fi
25
+ if [ $i -eq 30 ]; then
26
+ echo "Timeout waiting for Ollama server to start"
27
+ exit 1
28
+ fi
29
+ sleep 2
30
+ done
31
+
32
+ ollama list
33
+
34
+
35
+ # Optional: Pull a model (uncomment and modify as needed)
36
+ # echo "Pulling llama2 model..."
37
+ # ollama pull tinyllama:1.1b
38
+ ollama pull dolphin-llama3:8b
39
+
40
+ # Start FastAPI application
41
+ echo "Starting FastAPI application..."
42
+ python app.py
43
+
44
+ # Keep the script running and handle cleanup
45
+ cleanup() {
46
+ echo "Shutting down..."
47
+ kill $OLLAMA_PID 2>/dev/null || true
48
+ exit 0
49
+ }
50
+
51
+ trap cleanup SIGTERM SIGINT
52
+
53
+ # Wait for background processes
54
+ wait