-
-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathdocker-compose-jetson.yml
More file actions
83 lines (83 loc) · 2.68 KB
/
docker-compose-jetson.yml
File metadata and controls
83 lines (83 loc) · 2.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Jetson (ARM64 + CUDA) Docker Compose
#
# Prerequisites:
# - NVIDIA Jetson (Orin NX, AGX Orin, Xavier NX, etc.)
# - JetPack 5.1+ (L4T R35+) with Docker + NVIDIA Container Runtime
# - Docker: sudo apt-get install nvidia-container
#
# Build (must be done ON the Jetson — no cross-compilation):
# docker compose -f docker-compose-jetson.yml build
#
# Run:
# docker compose -f docker-compose-jetson.yml up -d
#
# For JetPack 5.1 (L4T R35), build with:
# L4T_TAG=r35.4.1 docker compose -f docker-compose-jetson.yml build
#
# CUDA architecture by Jetson model:
# - Jetson Orin NX / AGX Orin: 87
# - Jetson Xavier NX / AGX Xavier: 72
# - Jetson Nano: 53
#
services:
ezlocalai:
container_name: ezlocalai
build:
context: .
dockerfile: jetson.Dockerfile
args:
L4T_TAG: ${L4T_TAG:-r36.4.0}
CUDA_ARCH: ${CUDA_ARCH:-87}
TORCH_INDEX: ${TORCH_INDEX:-https://pypi.jetson-ai-lab.dev/simple/}
environment:
- EZLOCALAI_URL=${EZLOCALAI_URL:-http://localhost:8091}
- EZLOCALAI_API_KEY=${EZLOCALAI_API_KEY:-}
- DEFAULT_MODEL=${DEFAULT_MODEL:-unsloth/Qwen3.5-4B-GGUF}
- WHISPER_MODEL=${WHISPER_MODEL:-large-v3}
- IMG_MODEL=${IMG_MODEL:-}
- VIDEO_MODEL=${VIDEO_MODEL:-}
- LLM_BATCH_SIZE=${LLM_BATCH_SIZE:-512}
- LLM_UBATCH_SIZE=${LLM_UBATCH_SIZE:-256}
- LLM_MAX_TOKENS=${LLM_MAX_TOKENS:-8192}
- GPU_LAYERS=${GPU_LAYERS:--1}
- KV_CACHE_TYPE=${KV_CACHE_TYPE:-f16}
- CUDA_DOCKER_ARCH=all
- TOKENIZERS_PARALLELISM=false
- UVICORN_WORKERS=${UVICORN_WORKERS:-1}
- MAX_CONCURRENT_REQUESTS=${MAX_CONCURRENT_REQUESTS:-1}
- MAX_QUEUE_SIZE=${MAX_QUEUE_SIZE:-100}
- REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-300}
- TTS_ENABLED=${TTS_ENABLED:-true}
- STT_ENABLED=${STT_ENABLED:-true}
- VOICE_SERVER=${VOICE_SERVER:-}
- VOICE_SERVER_API_KEY=${VOICE_SERVER_API_KEY:-}
- IMAGE_SERVER=${IMAGE_SERVER:-}
- IMAGE_SERVER_API_KEY=${IMAGE_SERVER_API_KEY:-}
- TEXT_SERVER=${TEXT_SERVER:-}
- TEXT_SERVER_API_KEY=${TEXT_SERVER_API_KEY:-}
- LAZY_LOAD_VOICE=${LAZY_LOAD_VOICE:-true}
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8091/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 120s
logging:
driver: json-file
options:
max-size: "500m"
max-file: "5"
ulimits:
nofile:
soft: 65535
hard: 65535
runtime: nvidia
ports:
- "8091:8091"
volumes:
- ./models:/app/models
- ./hf:/root/.cache/huggingface/hub
- ./outputs:/app/outputs
- ./voices:/app/voices
- ./whispercpp:/app/whispercpp