llm_fastapi / Dockerfile
sreejith8100's picture
Update Dockerfile
fabcfe8 verified
raw
history blame
1.04 kB
# Base image with PyTorch 2.4.0 + CUDA 12.1
FROM pytorch/pytorch:2.4.0-cuda12.1-cudnn8-runtime
# Create a non-root user
RUN useradd -m -u 1000 user
USER user
WORKDIR /app
# Environment variables
ENV PATH="/home/user/.local/bin:$PATH"
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
ENV TORCH_CUDA_ARCH_LIST="8.0+PTX"
ENV MODEL_DIR=/app/models/minicpmv
# Install system dependencies
RUN apt-get update && apt-get install -y wget git && rm -rf /var/lib/apt/lists/*
# Copy requirements and install Python dependencies
COPY --chown=user requirements.txt .
RUN pip install --upgrade pip setuptools wheel
RUN pip install --no-cache-dir -r requirements.txt
# Pre-download MiniCPM-V-4 model at build time
RUN python -c "\
from huggingface_hub import snapshot_download; \
snapshot_download('openbmb/MiniCPM-V-4', local_dir='/app/models/minicpmv', local_dir_use_symlinks=False) \
"
# Copy application code
COPY --chown=user . .
# Expose FastAPI port
EXPOSE 7860
# Start the app
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]