-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.example.nvidia.yml
More file actions
132 lines (126 loc) · 3.51 KB
/
docker-compose.example.nvidia.yml
File metadata and controls
132 lines (126 loc) · 3.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
services:
# Ollama GPU Service
ollama-gpu:
image: ollama/ollama:0.12.0
pull_policy: if_not_present
restart: unless-stopped
runtime: nvidia
volumes:
- ollama_gpu_data:/root/.ollama
- ./ollama/ollama.json:/root/.ollama/ollama.json
- ./models:/models # Custom GGUF models and Modelfiles
networks:
- ollama_network
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
- OLLAMA_DEBUG=1
- OLLAMA_KEEP_ALIVE=60m
- OLLAMA_VERBOSE=1
- OLLAMA_HOST=0.0.0.0:11434
- OLLAMA_NUM_PARALLEL=1
- OLLAMA_NUM_CTX=16384
- OLLAMA_NUM_THREAD=8
- OLLAMA_MLOCK=false
- OLLAMA_NUM_BATCH=512 # Higher value for better performance
- OLLAMA_MAX_LOADED_MODELS=1 # Only one model in VRAM at a time
- OLLAMA_MAX_QUEUE=20 # Allow up to 20 queued requests
- OLLAMA_NEW_ESTIMATES=1 # Enable improved memory management
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
memory: 16G
limits:
memory: 36G
shm_size: 4gb # Increase from default 64MB to 1GB
oom_kill_disable: true
command: serve
ulimits:
memlock: -1 # Unlimited locked memory
# Ollama CPU Service
ollama-cpu:
image: ollama/ollama:0.12.0
pull_policy: if_not_present
restart: unless-stopped
volumes:
- ollama_cpu_data:/root/.ollama
- ./ollama/ollama.json:/root/.ollama/ollama.json
networks:
- ollama_network
environment:
- OLLAMA_DEBUG=1
- OLLAMA_KEEP_ALIVE=60m
- OLLAMA_VERBOSE=1
- OLLAMA_HOST=0.0.0.0:11434
- OLLAMA_NUM_PARALLEL=4 # More parallel requests for CPU
- OLLAMA_NUM_CTX=16384
- OLLAMA_NUM_THREAD=16 # More threads for CPU processing
- OLLAMA_MLOCK=false
- OLLAMA_NUM_BATCH=512
deploy:
resources:
reservations:
memory: 8G
limits:
memory: 16G
command: serve
proxy:
build: .
restart: unless-stopped
volumes:
- ./model-routing.json:/app/model-routing.json:ro
environment:
- API_KEY=${API_KEY}
- OLLAMA_GPU_URL=${OLLAMA_GPU_URL:-http://ollama-gpu:11434}
- OLLAMA_CPU_URL=${OLLAMA_CPU_URL:-http://ollama-cpu:11434}
depends_on:
- ollama-gpu
- ollama-cpu
networks:
- ollama_network
# Cloudflared Tunnel
cloudflared:
image: cloudflare/cloudflared:latest
restart: unless-stopped
networks:
- ollama_network
volumes:
- ./cloudflare:/etc/cloudflared
command: tunnel --no-autoupdate run
depends_on:
- proxy
# GPU Watchdog Service
watchdog-gpu:
build: ./watchdog-gpu
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./logs/watchdog:/var/log/watchdog
- ./docker-compose.yml:/etc/watchdog/docker-compose.yml:ro
environment:
- MONITORED_CONTAINER=ollama-proxy-ollama-gpu-1
- COMPOSE_SERVICE_NAME=ollama-gpu
- CHECK_INTERVAL=5
- RESTART_COOLDOWN=60
- LOG_LEVEL=INFO
- COMPOSE_FILE=/etc/watchdog/docker-compose.yml
- COMPOSE_PROJECT_NAME=ollama-proxy
depends_on:
- ollama-gpu
networks:
- ollama_network
healthcheck:
test: ["CMD", "/usr/local/bin/watchdog.sh", "healthcheck"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
volumes:
ollama_gpu_data:
ollama_cpu_data:
networks:
ollama_network: