-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.ai-llm.yml
More file actions
173 lines (165 loc) · 4.86 KB
/
compose.ai-llm.yml
File metadata and controls
173 lines (165 loc) · 4.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
name: cli-node-docker-atlas-lab
# Optional AI LLM layer for Open WebUI, Ollama, and n8n.
x-lab-env: &lab-env
TZ: ${TZ}
services:
gateway:
environment:
ATLAS_AI_LLM_ENABLED: "true"
gateway-ai-llm:
build:
context: ../..
dockerfile: infra/docker/images/gateway/Dockerfile
args:
CADDY_VERSION: ${CADDY_VERSION}
restart: unless-stopped
env_file:
- ../../env/lab.env
environment:
ATLAS_GATEWAY_TEMPLATE: Caddyfile.ai-llm.template
ATLAS_AI_LLM_ENABLED: "true"
ATLAS_WORKBENCH_ENABLED: "false"
ports:
- "${OPENWEBUI_HTTPS_PORT}:${OPENWEBUI_HTTPS_PORT}"
- "${OLLAMA_HTTPS_PORT}:${OLLAMA_HTTPS_PORT}"
- "${N8N_HTTPS_PORT}:${N8N_HTTPS_PORT}"
volumes:
- gateway-certs:/etc/caddy/certs
- gateway-config:/etc/caddy/dynamic
- gateway-data:/data
depends_on:
gateway:
condition: service_started
n8n:
condition: service_healthy
open-webui:
condition: service_healthy
ollama:
condition: service_healthy
networks:
- edge-net
- apps-net
- ai-llm-net
ollama:
build:
context: ../..
dockerfile: infra/docker/images/ollama/Dockerfile
args:
OLLAMA_VERSION: ${OLLAMA_VERSION}
restart: unless-stopped
gpus: ${OLLAMA_GPU_REQUEST}
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 11434 || exit 1"]
interval: 15s
timeout: 10s
retries: 10
start_period: 10s
environment:
<<: *lab-env
OLLAMA_HOST: 0.0.0.0:11434
OLLAMA_ORIGINS: ${OPENWEBUI_URL}
OLLAMA_RUNTIME_MODELS: ${OLLAMA_RUNTIME_MODELS}
NVIDIA_VISIBLE_DEVICES: ${OLLAMA_NVIDIA_VISIBLE_DEVICES}
NVIDIA_DRIVER_CAPABILITIES: ${OLLAMA_NVIDIA_DRIVER_CAPABILITIES}
volumes:
- ollama-data:/root/.ollama
- ../../infra/docker/images/model-sync/sync-ollama-models.sh:/opt/atlas-lab/model-sync/sync-ollama-models.sh:ro
networks:
ai-llm-net:
services-egress-net:
gw_priority: 1
open-webui:
image: ghcr.io/open-webui/open-webui:${OPENWEBUI_VERSION}
restart: unless-stopped
depends_on:
ollama:
condition: service_healthy
environment:
<<: *lab-env
WEBUI_URL: ${OPENWEBUI_URL}
WEBUI_SECRET_KEY: ${OPENWEBUI_SECRET_KEY}
CORS_ALLOW_ORIGIN: ${OPENWEBUI_CORS_ALLOW_ORIGIN}
USER_AGENT: AtlasLab-OpenWebUI/1.0
ENABLE_OPENAI_API: "false"
ENABLE_OLLAMA_API: "true"
OLLAMA_BASE_URL: http://ollama:11434
RAG_OLLAMA_BASE_URL: http://ollama:11434
RAG_EMBEDDING_ENGINE: ollama
RAG_EMBEDDING_MODEL: ${OLLAMA_EMBEDDING_MODEL}
USE_EMBEDDING_MODEL_DOCKER: ""
USE_AUXILIARY_EMBEDDING_MODEL_DOCKER: ""
AUXILIARY_EMBEDDING_MODEL: ""
WEBUI_ADMIN_NAME: ${OPENWEBUI_ROOT_NAME}
WEBUI_ADMIN_EMAIL: ${OPENWEBUI_ROOT_EMAIL}
WEBUI_ADMIN_PASSWORD: ${OPENWEBUI_ROOT_PASSWORD}
ENABLE_SIGNUP: "false"
WEBUI_AUTH: "true"
volumes:
- open-webui-data:/app/backend/data
networks:
apps-net:
ai-llm-net:
services-egress-net:
gw_priority: 1
n8n:
build:
context: ../..
dockerfile: infra/docker/images/n8n/Dockerfile
args:
N8N_VERSION: ${N8N_VERSION}
restart: unless-stopped
healthcheck:
test:
[
"CMD-SHELL",
"node -e \"require('node:http').get('http://127.0.0.1:5678/healthz/readiness', (response) => process.exit(response.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))\""
]
interval: 10s
timeout: 5s
retries: 12
start_period: 20s
environment:
<<: *lab-env
GENERIC_TIMEZONE: ${TZ}
N8N_HOST: ${LAB_PUBLIC_HOST}
N8N_PORT: "5678"
N8N_PROTOCOL: https
N8N_EDITOR_BASE_URL: ${N8N_URL}
WEBHOOK_URL: ${N8N_URL}
N8N_PROXY_HOPS: "1"
N8N_SECURE_COOKIE: "true"
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
SSL_CERT_FILE: /etc/ssl/certs/ca-certificates.crt
CURL_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
NODE_OPTIONS: --use-openssl-ca
NODE_EXTRA_CA_CERTS: /etc/ssl/certs/ca-certificates.crt
N8N_DIAGNOSTICS_ENABLED: "false"
N8N_VERSION_NOTIFICATIONS_ENABLED: "false"
N8N_TEMPLATES_ENABLED: "true"
EXTERNAL_FRONTEND_HOOKS_URLS: ""
N8N_DIAGNOSTICS_CONFIG_FRONTEND: ""
N8N_DIAGNOSTICS_CONFIG_BACKEND: ""
volumes:
- n8n-data:/home/node/.n8n
networks:
ai-llm-net:
services-egress-net:
gw_priority: 1
networks:
edge-net:
name: cli-node-docker-atlas-lab_edge
services-egress-net:
name: cli-node-docker-atlas-lab_services_egress
apps-net:
name: cli-node-docker-atlas-lab_apps
internal: true
ai-llm-net:
name: cli-node-docker-atlas-lab_ai_llm
internal: true
volumes:
gateway-certs:
gateway-config:
gateway-data:
n8n-data:
ollama-data:
open-webui-data: