-
Notifications
You must be signed in to change notification settings - Fork 623
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
148 lines (138 loc) · 3.6 KB
/
docker-compose.yml
File metadata and controls
148 lines (138 loc) · 3.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# Task Backend Worker - Docker Compose for development and testing
version: '3.8'
services:
# Redis for Celery backend
redis:
image: redis:7-alpine
ports:
- "6379:6379"
command: redis-server --appendonly yes
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 3
# Task worker with Celery backend
worker-celery:
build: .
environment:
- TASK_BACKEND_TYPE=celery
- TASK_CELERY_BROKER_URL=redis://redis:6379/0
- TASK_CELERY_RESULT_BACKEND=redis://redis:6379/0
- TASK_WORKER_NAME=celery-worker-1
- TASK_WORKER_CONCURRENCY=4
- TASK_LOG_LEVEL=INFO
depends_on:
redis:
condition: service_healthy
volumes:
- ./logs:/app/logs
restart: unless-stopped
# Task worker with Hatchet backend (requires external Hatchet server)
worker-hatchet:
build: .
environment:
- TASK_BACKEND_TYPE=hatchet
- TASK_HATCHET_TOKEN=${HATCHET_TOKEN}
- TASK_HATCHET_SERVER_URL=${HATCHET_SERVER_URL:-https://app.hatchet.run}
- TASK_WORKER_NAME=hatchet-worker-1
- TASK_LOG_LEVEL=INFO
volumes:
- ./logs:/app/logs
restart: unless-stopped
profiles:
- hatchet
# Temporal server for development
temporal:
image: temporalio/auto-setup:1.22
ports:
- "7233:7233"
- "8080:8080" # Web UI
environment:
- DB=postgresql
- DB_PORT=5432
- POSTGRES_USER=temporal
- POSTGRES_PWD=temporal
- POSTGRES_SEEDS=postgres
depends_on:
postgres:
condition: service_healthy
volumes:
- temporal_data:/etc/temporal
profiles:
- temporal
# PostgreSQL for Temporal
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_USER=temporal
- POSTGRES_PASSWORD=temporal
- POSTGRES_DB=temporal
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U temporal"]
interval: 10s
timeout: 5s
retries: 5
profiles:
- temporal
# Task worker with Temporal backend
worker-temporal:
build: .
environment:
- TASK_BACKEND_TYPE=temporal
- TASK_TEMPORAL_HOST=temporal
- TASK_TEMPORAL_PORT=7233
- TASK_TEMPORAL_NAMESPACE=default
- TASK_TEMPORAL_TASK_QUEUE=task-queue
- TASK_WORKER_NAME=temporal-worker-1
- TASK_LOG_LEVEL=INFO
depends_on:
temporal:
condition: service_started
volumes:
- ./logs:/app/logs
restart: unless-stopped
profiles:
- temporal
# Monitoring and observability
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
profiles:
- monitoring
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
profiles:
- monitoring
volumes:
redis_data:
postgres_data:
temporal_data:
prometheus_data:
grafana_data:
networks:
default:
name: task-backend-network