brendon-ai commited on
Commit
3b1f63b
·
verified ·
1 Parent(s): 31f18df

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +12 -7
Dockerfile CHANGED
@@ -1,20 +1,25 @@
1
  # Use a Python base image
2
  FROM python:3.9-slim-buster
3
 
4
- # Set working directory
5
  WORKDIR /app
6
 
7
- # Install dependencies
 
 
 
 
 
8
  COPY requirements.txt .
9
  RUN pip install --no-cache-dir -r requirements.txt
10
 
11
  # Copy your application code
12
  COPY app.py .
13
 
14
- # Expose the port your API will run on (Hugging Face Spaces uses 7860 by default for Gradio/Streamlit,
15
- # but for custom Docker, you can choose, e.g., 8000 for FastAPI or 5000 for Flask).
16
  EXPOSE 8000
17
 
18
- # Command to run your FastAPI/Flask app
19
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"] # For FastAPI
20
- # CMD ["python", "app.py"] # For a simple Flask app
 
1
  # Use a Python base image
2
  FROM python:3.9-slim-buster
3
 
4
+ # Set working directory inside the container
5
  WORKDIR /app
6
 
7
+ # Create a virtual environment and activate it.
8
+ # This ensures a clean and isolated environment for your dependencies.
9
+ RUN python -m venv /opt/venv
10
+ ENV PATH="/opt/venv/bin:$PATH"
11
+
12
+ # Install dependencies from requirements.txt into the virtual environment
13
  COPY requirements.txt .
14
  RUN pip install --no-cache-dir -r requirements.txt
15
 
16
  # Copy your application code
17
  COPY app.py .
18
 
19
+ # Expose the port your API will run on
20
+ # Hugging Face Spaces typically uses port 8000 for custom Docker builds
21
  EXPOSE 8000
22
 
23
+ # Command to start the FastAPI application using Uvicorn as a Python module.
24
+ # This is more robust as it explicitly invokes 'python' to run the 'uvicorn' module.
25
+ CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]