|
|
|
|
|
|
|
|
|
|
|
version: '3.8' |
|
|
|
|
|
services: |
|
|
|
|
|
graphllm: |
|
|
build: |
|
|
context: . |
|
|
dockerfile: Dockerfile |
|
|
container_name: graphllm |
|
|
image: graphllm:latest |
|
|
ports: |
|
|
- "8000:8000" |
|
|
volumes: |
|
|
|
|
|
- graphllm-data:/app/data |
|
|
- graphllm-uploads:/app/uploads |
|
|
- graphllm-logs:/app/logs |
|
|
- graphllm-cache:/app/cache |
|
|
environment: |
|
|
|
|
|
- GEMINI_API_KEY=${GEMINI_API_KEY} |
|
|
- GEMINI_MODEL=${GEMINI_MODEL:-gemini-1.5-flash} |
|
|
|
|
|
|
|
|
- ENVIRONMENT=${ENVIRONMENT:-production} |
|
|
- LOG_LEVEL=${LOG_LEVEL:-INFO} |
|
|
- DEBUG=false |
|
|
|
|
|
|
|
|
- LLM_TEMPERATURE=${LLM_TEMPERATURE:-0.7} |
|
|
- LLM_MAX_TOKENS=${LLM_MAX_TOKENS:-2048} |
|
|
|
|
|
|
|
|
- EMBEDDING_MODEL=${EMBEDDING_MODEL:-all-MiniLM-L6-v2} |
|
|
- EMBEDDING_BATCH_SIZE=${EMBEDDING_BATCH_SIZE:-128} |
|
|
|
|
|
|
|
|
- API_HOST=0.0.0.0 |
|
|
- API_PORT=8000 |
|
|
- MAX_FILE_SIZE_MB=${MAX_FILE_SIZE_MB:-50} |
|
|
|
|
|
restart: unless-stopped |
|
|
healthcheck: |
|
|
test: ["CMD", "curl", "-f", "http://localhost:8000/"] |
|
|
interval: 30s |
|
|
timeout: 10s |
|
|
retries: 3 |
|
|
start_period: 60s |
|
|
networks: |
|
|
- graphllm-network |
|
|
|
|
|
volumes: |
|
|
|
|
|
graphllm-data: |
|
|
driver: local |
|
|
graphllm-uploads: |
|
|
driver: local |
|
|
graphllm-logs: |
|
|
driver: local |
|
|
graphllm-cache: |
|
|
driver: local |
|
|
|
|
|
networks: |
|
|
graphllm-network: |
|
|
driver: bridge |
|
|
|