faq / Dockerfile
brendon-ai's picture
Update Dockerfile
04f041a verified
raw
history blame
1.35 kB
# Use a Python base image
FROM python:3.9-slim-buster
# Set working directory inside the container
WORKDIR /app
# Create a virtual environment and activate it.
# This ensures a clean and isolated environment for your dependencies.
RUN python -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
# Create a dedicated directory for Hugging Face cache and ensure permissions.
# This prevents 'PermissionError' issues when downloading models by directing
# Hugging Face to a location with guaranteed write access.
ENV HF_HOME=/app/.hf_cache
RUN mkdir -p ${HF_HOME} && chmod -R 777 ${HF_HOME}
# Install dependencies from requirements.txt into the virtual environment
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
RUN python -c "from transformers import AutoTokenizer, AutoModelForMaskedLM; AutoTokenizer.from_pretrained('boltuix/NeuroBERT-Tiny'); AutoModelForMaskedLM.from_pretrained('boltuix/NeuroBERT-Tiny')"
# Copy your application code
COPY app.py .
# Expose the port your API will run on
# Hugging Face Spaces typically uses port 8000 for custom Docker builds
EXPOSE 8000
# Command to start the FastAPI application using Uvicorn as a Python module.
# This is more robust as it explicitly invokes 'python' to run the 'uvicorn' module.
CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]