SmolVLM2-on-llama.cpp / Dockerfile
Luigi's picture
update dockerfile
44456ef
raw
history blame
1.51 kB
# Use Ubuntu 22.04 as a public base image to avoid GHCR permissions
FROM ubuntu:22.04
# Disable interactive prompts and set timezone for tzdata
ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=Etc/UTC
# Use bash with strict modes for debugging
SHELL ["/bin/bash", "-euxo", "pipefail", "-c"]
# 1. Install OS-level dependencies
RUN echo "### STEP 1: Installing OS-level dependencies" && \
apt-get update && \
apt-get install -y --no-install-recommends \
tzdata \
build-essential \
cmake \
libopenblas-dev \
python3 \
python3-pip \
python3-opencv && \
rm -rf /var/lib/apt/lists/*
# 2. Prepare the application directory
RUN echo "### STEP 2: Preparing application directory"
WORKDIR /app
COPY requirements.txt ./
COPY app.py ./
# Copy any other source files or directories needed by your app
# 3. Install Python dependencies (excluding llama-cpp-python)
RUN echo "### STEP 3: Installing Python dependencies" && \
pip3 install --upgrade pip && \
pip3 install --no-cache-dir -r requirements.txt
# 4. Build and install llama-cpp-python from source with OpenBLAS
RUN echo "### STEP 4: Building and installing llama-cpp-python with OpenBLAS" && \
export CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" && \
pip3 install --no-cache-dir --force-reinstall --no-binary llama-cpp-python llama-cpp-python
# 5. Expose port and launch the application
RUN echo "### STEP 5: Finalizing Docker image"
EXPOSE 7860
CMD ["python3", "app.py"]