-
Notifications
You must be signed in to change notification settings - Fork 9
Expand file tree
/
Copy pathdocker-compose.oracle.yml
More file actions
218 lines (208 loc) · 6.95 KB
/
docker-compose.oracle.yml
File metadata and controls
218 lines (208 loc) · 6.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
# =============================================================================
# AccountSafe - Oracle Cloud Production Compose (API-only)
# =============================================================================
# Hosts:
# - Frontend: Vercel (accountsafe.vercel.app) - NOT deployed here
# - Backend : Oracle Cloud Ampere A1 VM (this file)
#
# Services on VM:
# db -> Postgres 15 (internal network, not published)
# backend -> Django + Gunicorn (internal network, not published)
# nginx -> TLS termination + reverse proxy for api.<domain> (80/443 public)
# certbot -> Let's Encrypt auto-renewal
# backup -> Automated Postgres dumps (every 6h, 7-day retention)
#
# Usage:
# cp .env.oracle.example .env
# # edit .env (DOMAIN, SECRET_KEY, DB_PASSWORD, etc.)
# make oracle-ssl-init # one time, requires DNS already pointing here
# make oracle-up
# =============================================================================
services:
db:
image: postgres:15-alpine
container_name: accountsafe-db
restart: always
environment:
POSTGRES_DB: ${DB_NAME:-accountsafe}
POSTGRES_USER: ${DB_USER:-postgres}
POSTGRES_PASSWORD: ${DB_PASSWORD:?DB_PASSWORD is required}
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-accountsafe}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
networks:
- internal
deploy:
resources:
limits:
cpus: '1'
memory: 1G
reservations:
memory: 256M
backend:
build:
context: ./backend
dockerfile: Dockerfile
container_name: accountsafe-backend
restart: always
depends_on:
db:
condition: service_healthy
environment:
DEBUG: "False"
SECRET_KEY: ${SECRET_KEY:?SECRET_KEY is required}
# DOMAIN is the API hostname (e.g. accountsafe.duckdns.org or 140-245-20-107.sslip.io).
# We always allow the raw IP too so container health checks still work.
ALLOWED_HOSTS: ${DOMAIN},${PUBLIC_IP},backend,localhost,127.0.0.1
# Vercel (production) + previews are handled by CORS_ALLOWED_ORIGIN_REGEXES in settings.py,
# but explicit pinned origins go here.
CORS_ALLOWED_ORIGINS: ${FRONTEND_ORIGIN:-https://accountsafe.vercel.app}
# CSRF trust for cross-origin unsafe methods + admin login over HTTPS.
CSRF_TRUSTED_ORIGINS: https://${DOMAIN},${FRONTEND_ORIGIN:-https://accountsafe.vercel.app}
DB_NAME: ${DB_NAME:-accountsafe}
DB_USER: ${DB_USER:-postgres}
DB_PASSWORD: ${DB_PASSWORD:?DB_PASSWORD is required}
DB_HOST: db
DB_PORT: "5432"
EMAIL_HOST_USER: ${EMAIL_HOST_USER:-}
EMAIL_HOST_PASSWORD: ${EMAIL_HOST_PASSWORD:-}
DEFAULT_FROM_EMAIL: ${DEFAULT_FROM_EMAIL:-}
TURNSTILE_SECRET_KEY: ${TURNSTILE_SECRET_KEY:-}
ENCRYPTION_KEY: ${ENCRYPTION_KEY:-}
SENTRY_DSN: ${SENTRY_DSN:-}
APP_VERSION: ${APP_VERSION:-1.0.0}
# Gunicorn sizing. Ampere A1 (4 OCPU) easily handles 4 workers x 4
# threads = 16 concurrent. Override in .env if needed.
GUNICORN_WORKERS: ${GUNICORN_WORKERS:-4}
GUNICORN_THREADS: ${GUNICORN_THREADS:-4}
DB_CONN_MAX_AGE: ${DB_CONN_MAX_AGE:-60}
volumes:
- static_files:/app/staticfiles
- media_files:/app/media
networks:
# `internal` for DB access (DB stays unreachable from the internet).
# `external` so the backend itself can make OUTBOUND calls to
# Cloudflare Turnstile, Gmail SMTP, Sentry, HaveIBeenPwned, etc.
# (The backend never publishes its port, so outbound access here
# does not expose it to inbound internet traffic.)
- internal
- external
deploy:
resources:
limits:
cpus: '2'
memory: 2G
reservations:
memory: 512M
# Uses the Dockerfile's default CMD (./docker-entrypoint.sh), which does
# collectstatic + migrate + gunicorn. No `command:` override here to avoid
# the YAML-folded-scalar bug that split gunicorn's flags across shell lines.
nginx:
image: nginx:1.27-alpine
container_name: accountsafe-nginx
restart: always
depends_on:
- backend
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.oracle.conf:/etc/nginx/templates/default.conf.template:ro
- ./certbot/conf:/etc/letsencrypt:ro
- ./certbot/www:/var/www/certbot:ro
- static_files:/app/staticfiles:ro
- media_files:/app/media:ro
environment:
DOMAIN: ${DOMAIN:?DOMAIN is required}
networks:
- internal
- external
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
memory: 128M
# The nginx:alpine entrypoint only runs its init scripts (including
# template rendering via envsubst) when $1 is `nginx` or `nginx-debug`.
# We override CMD with /bin/sh, so we must render templates ourselves
# before starting nginx. We also keep a 6-hour reload loop so certbot's
# renewed Let's Encrypt certs are picked up without restarting.
command:
- /bin/sh
- -c
- |
envsubst '$$DOMAIN' < /etc/nginx/templates/default.conf.template > /etc/nginx/conf.d/default.conf
{ while :; do sleep 6h & wait $${!}; nginx -s reload 2>/dev/null || true; done; } &
exec nginx -g 'daemon off;'
certbot:
image: certbot/certbot:latest
container_name: accountsafe-certbot
restart: unless-stopped
volumes:
- ./certbot/conf:/etc/letsencrypt
- ./certbot/www:/var/www/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew --quiet; sleep 12h & wait $${!}; done;'"
networks:
- external
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
reservations:
memory: 64M
backup:
image: prodrigestivill/postgres-backup-local:15-alpine
container_name: accountsafe-backup
restart: always
depends_on:
db:
condition: service_healthy
environment:
POSTGRES_HOST: db
POSTGRES_DB: ${DB_NAME:-accountsafe}
POSTGRES_USER: ${DB_USER:-postgres}
POSTGRES_PASSWORD: ${DB_PASSWORD:?DB_PASSWORD is required}
SCHEDULE: "0 */6 * * *"
BACKUP_KEEP_DAYS: 7
BACKUP_KEEP_WEEKS: 4
BACKUP_KEEP_MONTHS: 3
BACKUP_SUFFIX: .sql.gz
HEALTHCHECK_PORT: 8080
volumes:
- ./backups:/backups
networks:
- internal
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:8080/"]
interval: 60s
timeout: 5s
retries: 3
start_period: 30s
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
reservations:
memory: 64M
networks:
internal:
driver: bridge
internal: true
external:
driver: bridge
volumes:
postgres_data:
driver: local
static_files:
driver: local
media_files:
driver: local