| FROM huggingface/autotrain-advanced:latest | |
| # Speed & smaller layers | |
| ENV PIP_NO_CACHE_DIR=1 \ | |
| PIP_DISABLE_PIP_VERSION_CHECK=1 \ | |
| HF_HUB_ENABLE_HF_TRANSFER=1 \ | |
| HF_HUB_ENABLE_QUICK_EXTRACT=1 | |
| # Python deps (install Transformers from ZIP so we don't need git) | |
| RUN python -m pip install --upgrade pip setuptools wheel && \ | |
| pip install -U autotrain-advanced && \ | |
| pip install -U "transformers @ https://github.com/huggingface/transformers/archive/refs/heads/main.zip" \ | |
| "tokenizers>=0.20.0" "accelerate>=1.0.0" kernels huggingface_hub | |
| # ---- (GPU) Choose ONE Torch line that matches your host ---- | |
| # CUDA 12.1: | |
| # RUN pip install --index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio | |
| # CUDA 11.8: | |
| # RUN pip install --index-url https://download.pytorch.org/whl/cu118 torch torchvision torchaudio | |
| # CPU-only: | |
| # RUN pip install --index-url https://download.pytorch.org/whl/cpu torch torchvision torchaudio | |
| # Optional sanity check at build time | |
| # RUN python - <<'PY' | |
| # from transformers import AutoConfig, __version__; print("Transformers:", __version__) | |
| # AutoConfig.from_pretrained("openai/gpt-oss-2b"); print("OK: gpt_oss recognized") | |
| # PY | |
| EXPOSE 7860 | |
| CMD ["autotrain","app","--host","0.0.0.0","--port","7860","--workers","1"] | |