ngxson HF Staff commited on
Commit
c39ce8e
·
verified ·
1 Parent(s): 6531fa2

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +26 -4
Dockerfile CHANGED
@@ -1,11 +1,33 @@
1
- FROM ghcr.io/ggerganov/llama.cpp:server-cuda AS build
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  FROM node:22 AS runtime
3
 
4
  RUN npm i -g http-server
5
 
6
  WORKDIR /app
7
- COPY --from=build /libggml.so /app/libggml.so
8
- COPY --from=build /libllama.so /app/libllama.so
9
- COPY --from=build /llama-server /app/llama-server
10
 
11
  CMD ["http-server", "/app", "-p", "7860", "-c-1"]
 
 
1
+ ARG UBUNTU_VERSION=22.04
2
+ # This needs to generally match the container host's environment.
3
+ ARG CUDA_VERSION=12.6.0
4
+ # Target the CUDA build image
5
+ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6
+ # Target the CUDA runtime image
7
+ ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8
+
9
+ FROM ${BASE_CUDA_DEV_CONTAINER} AS build
10
+
11
+ # CUDA architecture to build for (defaults to all supported archs)
12
+ ARG CUDA_DOCKER_ARCH=default
13
+
14
+ RUN apt-get update && \
15
+ apt-get install -y build-essential git cmake libcurl4-openssl-dev
16
+
17
+ WORKDIR /app
18
+
19
+ RUN git clone https://github.com/ggerganov/llama.cpp --depth 1 .
20
+
21
+ # Use the default CUDA archs if not specified
22
+ RUN LDFLAGS="-cudart=static -lcublas_static -lcublasLt_static -lculibos" GGML_CUDA=1 LLAMA_CURL=1 LLAMA_CUDA=1 make llama-server
23
+
24
+
25
  FROM node:22 AS runtime
26
 
27
  RUN npm i -g http-server
28
 
29
  WORKDIR /app
30
+ COPY --from=build /app/llama-server /app/llama-server
 
 
31
 
32
  CMD ["http-server", "/app", "-p", "7860", "-c-1"]
33
+