Spaces:
Running
Running
update dockerfile
Browse files- Dockerfile +19 -17
- postBuild +0 -7
Dockerfile
CHANGED
@@ -1,33 +1,35 @@
|
|
1 |
-
# 1. Start from HF’s official CPU base image
|
2 |
FROM ghcr.io/huggingface/spaces-cpu:latest
|
3 |
|
4 |
-
#
|
|
|
|
|
|
|
|
|
|
|
5 |
RUN apt-get update && \
|
6 |
-
apt-get install -y --no-install-recommends \
|
7 |
-
build-essential cmake libopenblas-dev python3-opencv && \
|
8 |
rm -rf /var/lib/apt/lists/*
|
9 |
|
10 |
-
#
|
|
|
11 |
ENV CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS"
|
12 |
|
13 |
-
#
|
|
|
14 |
WORKDIR /app
|
15 |
-
COPY requirements.txt
|
16 |
COPY app.py ./
|
17 |
-
# (Copy
|
18 |
-
|
19 |
-
# 5. Install Python dependencies
|
20 |
-
# - APT deps from packages.txt
|
21 |
-
RUN xargs -r apt-get update -qq && \
|
22 |
-
xargs -r -a packages.txt apt-get install -y && \
|
23 |
-
rm -rf /var/lib/apt/lists/*
|
24 |
|
25 |
-
#
|
|
|
26 |
RUN pip install --no-cache-dir -r requirements.txt
|
27 |
|
28 |
-
#
|
|
|
29 |
RUN pip install --no-cache-dir --force-reinstall --no-binary llama-cpp-python llama-cpp-python
|
30 |
|
31 |
-
#
|
|
|
32 |
EXPOSE 7860
|
33 |
CMD ["python", "app.py"]
|
|
|
|
|
1 |
FROM ghcr.io/huggingface/spaces-cpu:latest
|
2 |
|
3 |
+
# Use bash with strict modes for debugging
|
4 |
+
SHELL ["/bin/bash", "-euxo", "pipefail", "-c"]
|
5 |
+
|
6 |
+
# 1. Copy and install OS-level dependencies
|
7 |
+
echo "### STEP 1: Installing APT packages"
|
8 |
+
COPY packages.txt ./
|
9 |
RUN apt-get update && \
|
10 |
+
xargs -r -a packages.txt apt-get install -y --no-install-recommends && \
|
|
|
11 |
rm -rf /var/lib/apt/lists/*
|
12 |
|
13 |
+
# 2. Set CMake flags for OpenBLAS
|
14 |
+
echo "### STEP 2: Setting CMake flags for OpenBLAS"
|
15 |
ENV CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS"
|
16 |
|
17 |
+
# 3. Set working directory and copy application code
|
18 |
+
echo "### STEP 3: Copying application code"
|
19 |
WORKDIR /app
|
20 |
+
COPY requirements.txt ./
|
21 |
COPY app.py ./
|
22 |
+
# (Copy additional code or folders as needed)
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
# 4. Install Python dependencies (excluding llama-cpp-python)
|
25 |
+
echo "### STEP 4: Installing Python dependencies"
|
26 |
RUN pip install --no-cache-dir -r requirements.txt
|
27 |
|
28 |
+
# 5. Build and install llama-cpp-python from source with OpenBLAS
|
29 |
+
echo "### STEP 5: Building llama-cpp-python from source"
|
30 |
RUN pip install --no-cache-dir --force-reinstall --no-binary llama-cpp-python llama-cpp-python
|
31 |
|
32 |
+
# 6. Expose port and launch
|
33 |
+
echo "### STEP 6: Launching application"
|
34 |
EXPOSE 7860
|
35 |
CMD ["python", "app.py"]
|
postBuild
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
#!/usr/bin/env bash
|
2 |
-
set -euxo pipefail
|
3 |
-
|
4 |
-
echo ">>> Running postBuild: force-building llama-cpp-python with OpenBLAS"
|
5 |
-
|
6 |
-
export CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS"
|
7 |
-
pip install --no-cache-dir --force-reinstall --no-binary llama-cpp-python llama-cpp-python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|