brendon-ai commited on
Commit
37348d1
·
verified ·
1 Parent(s): f97c475

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -8
Dockerfile CHANGED
@@ -17,21 +17,17 @@ RUN mkdir -p ${HF_HOME} && chmod -R 777 ${HF_HOME}
17
 
18
  # Install dependencies from requirements.txt into the virtual environment
19
  COPY requirements.txt .
20
-
21
- RUN pip install --upgrade pip
22
-
23
  RUN pip install --no-cache-dir -r requirements.txt
24
 
25
- # Pre-download the TinyLlama model and its tokenizer during the build process.
26
- # This makes the startup faster and ensures the model is available.
27
- # Using torch_dtype=torch.bfloat16 and device_map="auto" for efficient loading.
28
- RUN python -c "import torch; from transformers import AutoTokenizer, AutoModelForCausalLM; model_name='TinyLlama/TinyLlama-1.1B-Chat-v1.0'; AutoTokenizer.from_pretrained(model_name); AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32, device_map='auto')"
29
 
30
  # Copy your application code
31
  COPY app.py .
32
 
33
- # Expose the port your API will run on (Hugging Face Spaces typically uses 7860)
 
34
  EXPOSE 7860
35
 
36
  # Command to start the FastAPI application using Uvicorn as a Python module.
 
37
  CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
17
 
18
  # Install dependencies from requirements.txt into the virtual environment
19
  COPY requirements.txt .
 
 
 
20
  RUN pip install --no-cache-dir -r requirements.txt
21
 
22
+ RUN python -c "from transformers import AutoTokenizer, AutoModelForMaskedLM; AutoTokenizer.from_pretrained('boltuix/NeuroBERT-Tiny'); AutoModelForMaskedLM.from_pretrained('boltuix/NeuroBERT-Tiny')"
 
 
 
23
 
24
  # Copy your application code
25
  COPY app.py .
26
 
27
+ # Expose the port your API will run on
28
+ # Hugging Face Spaces typically uses port 8000 for custom Docker builds
29
  EXPOSE 7860
30
 
31
  # Command to start the FastAPI application using Uvicorn as a Python module.
32
+ # This is more robust as it explicitly invokes 'python' to run the 'uvicorn' module.
33
  CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]