llama32-3b-instruct / Dockerfile
yusufs's picture
fix(entrypoint) Dockerile
a48cf7b verified
raw
history blame
1.46 kB
FROM docker.io/vllm/vllm-openai:v0.10.0
ENV MODEL_NAME="meta-llama/Llama-3.2-3B-Instruct"
ENV MODEL_REV="0cb88a4f764b7a12671c53f0838cd831a0843b95"
# Download at build time,
# to ensure during restart we won't have to wait for the download from HF (only wait for docker pull).
# In Docker Spaces, the secrets management is different for security reasons.
# Once you create a secret in the Settings tab,
# you can expose the secret by adding the following line in your Dockerfile:
#
# For example, if SECRET_EXAMPLE is the name of the secret you created in the Settings tab,
# you can read it at build time by mounting it to a file, then reading it with $(cat /run/secrets/SECRET_EXAMPLE).
# https://huggingface.co/docs/hub/en/spaces-sdks-docker#buildtime
#
# AFTER TRIAL AND ERROR WE GOT 16GB (16431849854 bytes) OF LAYERS :(
#
# RUN --mount=type=secret,id=HF_TOKEN,mode=0444,required=true HF_TOKEN=$(cat /run/secrets/HF_TOKEN) python /app/download_model.py
EXPOSE 7860
ENTRYPOINT ["vllm", "serve"]
CMD [
"--model", "$MODEL_NAME",
"--task", "generate",
"--revision", "$MODEL_REV",
"--code-revision", "$MODEL_REV",
"--tokenizer-revision", "$MODEL_REV",
"--seed", "42",
"--host", "0.0.0.0",
"--port", "7860",
"--max-num-batched-tokens", "32768",
"--max-model-len", "32768",
"--dtype", "float16",
"--enforce-eager",
"--gpu-memory-utilization", "0.9",
"--enable-prefix-caching",
"--disable-log-requests",
"--trust-remote-code"
]