Update Dockerfile
Browse files- Dockerfile +14 -26
Dockerfile
CHANGED
|
@@ -1,41 +1,29 @@
|
|
| 1 |
-
# ---- Base
|
| 2 |
FROM huggingface/autotrain-advanced:latest
|
| 3 |
|
| 4 |
-
#
|
| 5 |
ENV PIP_NO_CACHE_DIR=1 \
|
| 6 |
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
| 7 |
HF_HUB_ENABLE_HF_TRANSFER=1 \
|
| 8 |
HF_HUB_ENABLE_QUICK_EXTRACT=1
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
RUN apt-get update && apt-get install -y --no-install-recommends git && rm -rf /var/lib/apt/lists/*
|
| 12 |
-
|
| 13 |
-
# ---- Python deps
|
| 14 |
-
# NOTE: Install Transformers from source to ensure gpt_oss is recognized.
|
| 15 |
-
# Keep tokenizers/accelerate fresh as well.
|
| 16 |
RUN python -m pip install --upgrade pip setuptools wheel && \
|
| 17 |
-
pip install
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
"tokenizers>=0.20.0" \
|
| 21 |
-
"accelerate>=1.0.0" \
|
| 22 |
-
kernels \
|
| 23 |
-
huggingface_hub
|
| 24 |
|
| 25 |
-
# ---- (GPU) Choose
|
| 26 |
-
# CUDA 12.1
|
| 27 |
-
# RUN pip install --
|
| 28 |
-
# CUDA 11.8
|
| 29 |
-
# RUN pip install --
|
| 30 |
# CPU-only:
|
| 31 |
-
# RUN pip install --
|
| 32 |
|
| 33 |
-
#
|
| 34 |
# RUN python - <<'PY'
|
| 35 |
-
# from transformers import AutoConfig, __version__
|
| 36 |
-
# print("
|
| 37 |
-
# AutoConfig.from_pretrained("openai/gpt-oss-2b") # or your target checkpoint
|
| 38 |
-
# print("OK: gpt_oss recognized")
|
| 39 |
# PY
|
| 40 |
|
| 41 |
EXPOSE 7860
|
|
|
|
|
|
|
| 1 |
FROM huggingface/autotrain-advanced:latest
|
| 2 |
|
| 3 |
+
# Speed & smaller layers
|
| 4 |
ENV PIP_NO_CACHE_DIR=1 \
|
| 5 |
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
| 6 |
HF_HUB_ENABLE_HF_TRANSFER=1 \
|
| 7 |
HF_HUB_ENABLE_QUICK_EXTRACT=1
|
| 8 |
|
| 9 |
+
# Python deps (install Transformers from ZIP so we don't need git)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
RUN python -m pip install --upgrade pip setuptools wheel && \
|
| 11 |
+
pip install -U autotrain-advanced && \
|
| 12 |
+
pip install -U "transformers @ https://github.com/huggingface/transformers/archive/refs/heads/main.zip" \
|
| 13 |
+
"tokenizers>=0.20.0" "accelerate>=1.0.0" kernels huggingface_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
# ---- (GPU) Choose ONE Torch line that matches your host ----
|
| 16 |
+
# CUDA 12.1:
|
| 17 |
+
# RUN pip install --index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio
|
| 18 |
+
# CUDA 11.8:
|
| 19 |
+
# RUN pip install --index-url https://download.pytorch.org/whl/cu118 torch torchvision torchaudio
|
| 20 |
# CPU-only:
|
| 21 |
+
# RUN pip install --index-url https://download.pytorch.org/whl/cpu torch torchvision torchaudio
|
| 22 |
|
| 23 |
+
# Optional sanity check at build time
|
| 24 |
# RUN python - <<'PY'
|
| 25 |
+
# from transformers import AutoConfig, __version__; print("Transformers:", __version__)
|
| 26 |
+
# AutoConfig.from_pretrained("openai/gpt-oss-2b"); print("OK: gpt_oss recognized")
|
|
|
|
|
|
|
| 27 |
# PY
|
| 28 |
|
| 29 |
EXPOSE 7860
|