brendon-ai commited on
Commit
b760c5e
·
verified ·
1 Parent(s): 543a618

Update startup.sh

Browse files
Files changed (1) hide show
  1. startup.sh +25 -25
startup.sh CHANGED
@@ -3,39 +3,39 @@
3
  # Exit on any error
4
  set -e
5
 
6
- echo "Starting Ollama and FastAPI application..."
7
 
8
- # Create necessary directories
9
- mkdir -p /tmp/ollama/models
10
- export OLLAMA_MODELS=/tmp/ollama/models
11
- export OLLAMA_HOME=/tmp/ollama
12
 
13
- # Start Ollama server in the background
14
- echo "Starting Ollama server..."
15
- ollama serve &
16
- OLLAMA_PID=$!
17
 
18
  # Wait for Ollama to be ready
19
- echo "Waiting for Ollama server to start..."
20
- for i in {1..30}; do
21
- if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
22
- echo "Ollama server is ready!"
23
- break
24
- fi
25
- if [ $i -eq 30 ]; then
26
- echo "Timeout waiting for Ollama server to start"
27
- exit 1
28
- fi
29
- sleep 2
30
- done
31
-
32
- ollama list
33
 
34
 
35
  # Optional: Pull a model (uncomment and modify as needed)
36
  # echo "Pulling llama2 model..."
37
  # ollama pull tinyllama:1.1b
38
- ollama pull HuggingFaceTB/SmolLM3-3B
39
 
40
  #deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
41
 
@@ -46,7 +46,7 @@ python app.py
46
  # Keep the script running and handle cleanup
47
  cleanup() {
48
  echo "Shutting down..."
49
- kill $OLLAMA_PID 2>/dev/null || true
50
  exit 0
51
  }
52
 
 
3
  # Exit on any error
4
  set -e
5
 
6
+ # echo "Starting Ollama and FastAPI application..."
7
 
8
+ # # Create necessary directories
9
+ # mkdir -p /tmp/ollama/models
10
+ # export OLLAMA_MODELS=/tmp/ollama/models
11
+ # export OLLAMA_HOME=/tmp/ollama
12
 
13
+ # # Start Ollama server in the background
14
+ # echo "Starting Ollama server..."
15
+ # ollama serve &
16
+ # OLLAMA_PID=$!
17
 
18
  # Wait for Ollama to be ready
19
+ # echo "Waiting for Ollama server to start..."
20
+ # for i in {1..30}; do
21
+ # if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
22
+ # echo "Ollama server is ready!"
23
+ # break
24
+ # fi
25
+ # if [ $i -eq 30 ]; then
26
+ # echo "Timeout waiting for Ollama server to start"
27
+ # exit 1
28
+ # fi
29
+ # sleep 2
30
+ # done
31
+
32
+ # ollama list
33
 
34
 
35
  # Optional: Pull a model (uncomment and modify as needed)
36
  # echo "Pulling llama2 model..."
37
  # ollama pull tinyllama:1.1b
38
+ # ollama pull HuggingFaceTB/SmolLM3-3B
39
 
40
  #deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
41
 
 
46
  # Keep the script running and handle cleanup
47
  cleanup() {
48
  echo "Shutting down..."
49
+ # kill $OLLAMA_PID 2>/dev/null || true
50
  exit 0
51
  }
52