Spaces:
Paused
Paused
fix(Dockerfile): use cmd single line
Browse files- Dockerfile +17 -20
Dockerfile
CHANGED
@@ -16,23 +16,20 @@ FROM docker.io/vllm/vllm-openai:v0.10.0
|
|
16 |
|
17 |
EXPOSE 7860
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
"--disable-log-requests",
|
37 |
-
"--trust-remote-code"
|
38 |
-
]
|
|
|
16 |
|
17 |
EXPOSE 7860
|
18 |
|
19 |
+
CMD vllm serve \
|
20 |
+
--model "meta-llama/Llama-3.2-3B-Instruct" \
|
21 |
+
--task generate \
|
22 |
+
--revision "0cb88a4f764b7a12671c53f0838cd831a0843b95" \
|
23 |
+
--code-revision "0cb88a4f764b7a12671c53f0838cd831a0843b95" \
|
24 |
+
--tokenizer-revision "0cb88a4f764b7a12671c53f0838cd831a0843b95" \
|
25 |
+
--seed 42 \
|
26 |
+
--host 0.0.0.0 \
|
27 |
+
--port 7860 \
|
28 |
+
--max-num-batched-tokens 32768 \
|
29 |
+
--max-model-len 32768 \
|
30 |
+
--dtype float16 \
|
31 |
+
--enforce-eager \
|
32 |
+
--gpu-memory-utilization 0.9 \
|
33 |
+
--enable-prefix-caching \
|
34 |
+
--disable-log-requests \
|
35 |
+
--trust-remote-code
|
|
|
|
|
|