WNT3D commited on
Commit
8a7f0ed
·
verified ·
1 Parent(s): 253070b

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +14 -26
Dockerfile CHANGED
@@ -1,41 +1,29 @@
1
- # ---- Base
2
  FROM huggingface/autotrain-advanced:latest
3
 
4
- # ---- Env (faster/cleaner installs + nicer HF downloads)
5
  ENV PIP_NO_CACHE_DIR=1 \
6
  PIP_DISABLE_PIP_VERSION_CHECK=1 \
7
  HF_HUB_ENABLE_HF_TRANSFER=1 \
8
  HF_HUB_ENABLE_QUICK_EXTRACT=1
9
 
10
- # ---- System deps (optional but handy)
11
- RUN apt-get update && apt-get install -y --no-install-recommends git && rm -rf /var/lib/apt/lists/*
12
-
13
- # ---- Python deps
14
- # NOTE: Install Transformers from source to ensure gpt_oss is recognized.
15
- # Keep tokenizers/accelerate fresh as well.
16
  RUN python -m pip install --upgrade pip setuptools wheel && \
17
- pip install --no-cache-dir -U \
18
- autotrain-advanced \
19
- "git+https://github.com/huggingface/transformers.git" \
20
- "tokenizers>=0.20.0" \
21
- "accelerate>=1.0.0" \
22
- kernels \
23
- huggingface_hub
24
 
25
- # ---- (GPU) Choose the right Torch wheel for your host. Uncomment ONE:
26
- # CUDA 12.1 example:
27
- # RUN pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio
28
- # CUDA 11.8 example:
29
- # RUN pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cu118 torch torchvision torchaudio
30
  # CPU-only:
31
- # RUN pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cpu torch torchvision torchaudio
32
 
33
- # ---- Health check: ensure AutoConfig sees gpt_oss at build time (optional)
34
  # RUN python - <<'PY'
35
- # from transformers import AutoConfig, __version__
36
- # print("Transformers:", __version__)
37
- # AutoConfig.from_pretrained("openai/gpt-oss-2b") # or your target checkpoint
38
- # print("OK: gpt_oss recognized")
39
  # PY
40
 
41
  EXPOSE 7860
 
 
1
  FROM huggingface/autotrain-advanced:latest
2
 
3
+ # Speed & smaller layers
4
  ENV PIP_NO_CACHE_DIR=1 \
5
  PIP_DISABLE_PIP_VERSION_CHECK=1 \
6
  HF_HUB_ENABLE_HF_TRANSFER=1 \
7
  HF_HUB_ENABLE_QUICK_EXTRACT=1
8
 
9
+ # Python deps (install Transformers from ZIP so we don't need git)
 
 
 
 
 
10
  RUN python -m pip install --upgrade pip setuptools wheel && \
11
+ pip install -U autotrain-advanced && \
12
+ pip install -U "transformers @ https://github.com/huggingface/transformers/archive/refs/heads/main.zip" \
13
+ "tokenizers>=0.20.0" "accelerate>=1.0.0" kernels huggingface_hub
 
 
 
 
14
 
15
+ # ---- (GPU) Choose ONE Torch line that matches your host ----
16
+ # CUDA 12.1:
17
+ # RUN pip install --index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio
18
+ # CUDA 11.8:
19
+ # RUN pip install --index-url https://download.pytorch.org/whl/cu118 torch torchvision torchaudio
20
  # CPU-only:
21
+ # RUN pip install --index-url https://download.pytorch.org/whl/cpu torch torchvision torchaudio
22
 
23
+ # Optional sanity check at build time
24
  # RUN python - <<'PY'
25
+ # from transformers import AutoConfig, __version__; print("Transformers:", __version__)
26
+ # AutoConfig.from_pretrained("openai/gpt-oss-2b"); print("OK: gpt_oss recognized")
 
 
27
  # PY
28
 
29
  EXPOSE 7860