brendon-ai commited on
Commit
5cd8a42
·
verified ·
1 Parent(s): c71461d

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +45 -22
Dockerfile CHANGED
@@ -1,33 +1,56 @@
1
- # Use a Python base image
2
- FROM python:3.9-slim-buster
3
 
4
- # Set working directory inside the container
 
 
 
5
  WORKDIR /app
6
 
7
- # Create a virtual environment and activate it.
8
- # This ensures a clean and isolated environment for your dependencies.
9
- RUN python -m venv /opt/venv
 
 
 
 
 
 
 
 
 
10
  ENV PATH="/opt/venv/bin:$PATH"
11
 
12
- # Create a dedicated directory for Hugging Face cache and ensure permissions.
13
- # This prevents 'PermissionError' issues when downloading models by directing
14
- # Hugging Face to a location with guaranteed write access.
15
- ENV HF_HOME=/app/.hf_cache
16
- RUN mkdir -p ${HF_HOME} && chmod -R 777 ${HF_HOME}
17
-
18
- # Install dependencies from requirements.txt into the virtual environment
19
  COPY requirements.txt .
20
- RUN pip install --no-cache-dir -r requirements.txt
 
 
 
 
21
 
22
- RUN python -c "from transformers import AutoTokenizer, AutoModelForMaskedLM; AutoTokenizer.from_pretrained('boltuix/NeuroBERT-Tiny'); AutoModelForMaskedLM.from_pretrained('boltuix/NeuroBERT-Tiny')"
 
 
 
23
 
24
- # Copy your application code
25
  COPY app.py .
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Expose the port your API will run on
28
- # Hugging Face Spaces typically uses port 8000 for custom Docker builds
29
- EXPOSE 7860
30
 
31
- # Command to start the FastAPI application using Uvicorn as a Python module.
32
- # This is more robust as it explicitly invokes 'python' to run the 'uvicorn' module.
33
- CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # Use Ubuntu as base image for better Ollama compatibility
2
+ FROM ubuntu:22.04
3
 
4
+ # Prevent interactive prompts during package installation
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ # Set working directory
8
  WORKDIR /app
9
 
10
+ # Install system dependencies
11
+ RUN apt-get update && apt-get install -y \
12
+ curl \
13
+ python3 \
14
+ python3-pip \
15
+ python3-venv \
16
+ wget \
17
+ ca-certificates \
18
+ && rm -rf /var/lib/apt/lists/*
19
+
20
+ # Create and activate virtual environment
21
+ RUN python3 -m venv /opt/venv
22
  ENV PATH="/opt/venv/bin:$PATH"
23
 
24
+ # Install Python dependencies
 
 
 
 
 
 
25
  COPY requirements.txt .
26
+ RUN pip install --no-cache-dir --upgrade pip && \
27
+ pip install --no-cache-dir -r requirements.txt
28
+
29
+ # Install Ollama
30
+ RUN curl -fsSL https://ollama.ai/install.sh | sh
31
 
32
+ # Create ollama user and directories
33
+ RUN useradd -m -s /bin/bash ollama && \
34
+ mkdir -p /home/ollama/.ollama && \
35
+ chown -R ollama:ollama /home/ollama/.ollama
36
 
37
+ # Copy application files
38
  COPY app.py .
39
+ COPY startup.sh .
40
+
41
+ # Make startup script executable
42
+ RUN chmod +x startup.sh
43
+
44
+ # Set environment variables
45
+ ENV OLLAMA_HOST=0.0.0.0:11434
46
+ ENV OLLAMA_MODELS=/home/ollama/.ollama/models
47
+
48
+ # Expose ports
49
+ EXPOSE 7860 11434
50
 
51
+ # Health check
52
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
53
+ CMD curl -f http://localhost:7860/health || exit 1
54
 
55
+ # Start both Ollama server and FastAPI app
56
+ CMD ["./startup.sh"]