-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdeploy.sh
More file actions
443 lines (400 loc) · 16.7 KB
/
deploy.sh
File metadata and controls
443 lines (400 loc) · 16.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
#!/bin/bash
#
# Master Deployment Script for Pinning Service
#
# Handles database migrations, backup, and auxiliary service deploys (x402, AI).
# Main services (webui, ipfs-server, Go pinning-service) are deployed manually.
#
# Migrations are tracked in a state file so each migration runs exactly once.
# All migrations are idempotent (IF NOT EXISTS / IF EXISTS) and safe to re-run
# if the state file is lost.
#
# Usage: sudo bash ./deploy.sh [OPTIONS]
#
# Options:
# --skip-pull Skip git pull (already up to date)
# --migrations-only Only run database migrations, skip x402/AI deploys
# --dry-run Show what would be done without doing it
# -h, --help Show this help message
#
set -e
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
DEPLOY_DIR="/home/root/pinning-service"
DB_USER="${DB_USER:-pinning_user}"
DB_NAME="${DB_NAME:-pinning_service}"
# PostgreSQL runs in Docker container
PG_CONTAINER="${PG_CONTAINER:-postgres-pinning}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
BOLD='\033[1m'
NC='\033[0m'
# Flags
SKIP_PULL=false
MIGRATIONS_ONLY=false
DRY_RUN=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--skip-pull) SKIP_PULL=true; shift ;;
--migrations-only) MIGRATIONS_ONLY=true; shift ;;
--dry-run) DRY_RUN=true; shift ;;
-h|--help)
head -18 "$0" | tail -15
exit 0
;;
*) echo -e "\033[1;33m [WARN]\033[0m Unknown argument: $1"; shift ;;
esac
done
log_step() { echo -e "\n${BOLD}${BLUE}[STEP]${NC} ${BOLD}$1${NC}"; }
log_ok() { echo -e "${GREEN} [OK]${NC} $1"; }
log_warn() { echo -e "${YELLOW} [WARN]${NC} $1"; }
log_err() { echo -e "${RED} [ERROR]${NC} $1"; }
log_info() { echo -e "${BLUE} [INFO]${NC} $1"; }
run_cmd() {
if [ "$DRY_RUN" = true ]; then
echo -e " ${YELLOW}[DRY-RUN]${NC} $*"
return 0
fi
"$@"
}
echo ""
echo -e "${BOLD}=========================================="
echo " Pinning Service Deployment"
echo "==========================================${NC}"
echo ""
echo " Source: $SCRIPT_DIR"
echo " Target: $DEPLOY_DIR"
if [ "$DRY_RUN" = true ]; then
echo -e " Mode: ${YELLOW}DRY RUN${NC}"
fi
echo ""
# ============================================
# Step 1: Git Pull
# ============================================
if [ "$SKIP_PULL" = false ]; then
log_step "Pulling latest code"
run_cmd git pull
log_ok "Code updated"
else
log_info "Skipping git pull"
fi
# ============================================
# Step 2: Database Migrations (BEFORE service restarts)
# ============================================
log_step "Running database migrations"
log_info "Migrations must complete before any service restart"
# Verify Docker container is running
if ! docker ps --format '{{.Names}}' | grep -q "^${PG_CONTAINER}$"; then
log_err "PostgreSQL container '$PG_CONTAINER' is not running"
log_err "Check: docker ps | grep postgres"
log_err "Override container name with: PG_CONTAINER=mycontainer bash deploy.sh"
exit 1
fi
log_info "Using PostgreSQL container: $PG_CONTAINER"
# Verify database is reachable
if ! docker exec "$PG_CONTAINER" psql -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" &>/dev/null; then
log_err "Cannot connect to database '$DB_NAME' as user '$DB_USER'"
log_err "Check credentials and container health: docker exec $PG_CONTAINER pg_isready"
exit 1
fi
log_ok "Database connection verified"
# ---- Pre-migration backup ----
# Take a lightweight dump before touching the schema so we can revert if needed.
BACKUP_DIR="$DEPLOY_DIR/backups"
BACKUP_FILE="$BACKUP_DIR/pre-migration-$(date -u +%Y%m%d_%H%M%S).dump"
if [ "$DRY_RUN" = false ]; then
mkdir -p "$BACKUP_DIR"
log_info "Taking pre-migration database backup..."
if docker exec "$PG_CONTAINER" pg_dump -U "$DB_USER" -d "$DB_NAME" -Fc -Z6 > "$BACKUP_FILE" 2>/dev/null && [ -s "$BACKUP_FILE" ]; then
BACKUP_SIZE=$(du -h "$BACKUP_FILE" 2>/dev/null | cut -f1)
log_ok "Backup saved: $BACKUP_FILE ($BACKUP_SIZE)"
log_info "To restore if needed:"
log_info " docker exec -i $PG_CONTAINER pg_restore -U $DB_USER -d $DB_NAME --clean --if-exists < $BACKUP_FILE"
else
log_err "Pre-migration backup FAILED — aborting deployment"
log_err "Will not modify database without a backup."
rm -f "$BACKUP_FILE"
exit 1
fi
# Keep only the 5 most recent backups to avoid filling disk
ls -t "$BACKUP_DIR"/pre-migration-*.dump 2>/dev/null | tail -n +6 | xargs rm -f 2>/dev/null || true
else
log_info "Would take pre-migration backup to $BACKUP_DIR/"
fi
MIGRATION_DIR="$SCRIPT_DIR/migrations/postgres"
MIGRATION_STATE="$DEPLOY_DIR/.migration_state"
# Create state file if it doesn't exist
if [ "$DRY_RUN" = false ]; then
touch "$MIGRATION_STATE" 2>/dev/null || MIGRATION_STATE="/tmp/.pinning_migration_state"
fi
# All migrations in dependency order.
# Each migration is idempotent (uses IF NOT EXISTS / IF EXISTS / DROP ... IF EXISTS)
# and safe to re-run, but we track state to avoid unnecessary work.
MIGRATION_FILES=(
"006_encrypted_api_keys.sql"
"007_cleanup_retry.sql"
"008_admin_audit_log.sql"
"009_hash_session_tokens.sql"
"010_ensure_user_id_columns.sql"
"011_referral_fk_to_user_id.sql"
"012_nullable_legacy_columns.sql"
"013_api_key_hash.sql"
"014_user_credits_unique_user_id.sql"
"015_blocked_cids.sql"
"016_blocked_cids_mode.sql"
)
migrations_applied=0
migrations_skipped=0
for migration in "${MIGRATION_FILES[@]}"; do
migration_path="$MIGRATION_DIR/$migration"
# Skip if already applied (tracked in state file)
if [ "$DRY_RUN" = false ] && grep -qF "$migration" "$MIGRATION_STATE" 2>/dev/null; then
log_info "$migration — already applied, skipping"
migrations_skipped=$((migrations_skipped + 1))
continue
fi
if [ ! -f "$migration_path" ]; then
log_warn "$migration not found at $migration_path — skipping"
continue
fi
log_info "Applying $migration..."
if run_cmd docker exec -i "$PG_CONTAINER" psql -U "$DB_USER" -d "$DB_NAME" -v ON_ERROR_STOP=1 < "$migration_path"; then
log_ok "$migration applied"
migrations_applied=$((migrations_applied + 1))
# Record successful migration
if [ "$DRY_RUN" = false ]; then
echo "$migration $(date -u +%Y-%m-%dT%H:%M:%SZ)" >> "$MIGRATION_STATE"
fi
else
log_err "$migration FAILED — aborting deployment"
echo ""
log_err "Fix the migration issue before restarting services."
log_err "Services have NOT been restarted."
log_err ""
log_err "To debug, run the migration manually:"
log_err " docker exec -i $PG_CONTAINER psql -U $DB_USER -d $DB_NAME < $migration_path"
log_err ""
log_err "After fixing, re-run: bash deploy.sh --skip-pull"
exit 1
fi
done
if [ $migrations_applied -gt 0 ]; then
log_ok "$migrations_applied migration(s) applied, $migrations_skipped already up to date"
else
log_ok "All migrations already up to date ($migrations_skipped skipped)"
fi
if [ "$MIGRATIONS_ONLY" = true ]; then
echo ""
log_ok "Migrations complete. Deploy services manually."
exit 0
fi
# ============================================
# Step 3: Deploy x402-skale
# ============================================
log_step "Deploying x402-skale"
if [ -f "$SCRIPT_DIR/x402-skale/update.sh" ]; then
run_cmd bash "$SCRIPT_DIR/x402-skale/update.sh"
log_ok "x402-skale deployed"
else
log_warn "x402-skale/update.sh not found — skipping"
fi
# ============================================
# Step 4: Deploy AI service
# ============================================
log_step "Deploying AI service"
if [ -f "$SCRIPT_DIR/ai/install.sh" ]; then
run_cmd bash "$SCRIPT_DIR/ai/install.sh"
log_ok "AI service deployed"
else
log_warn "ai/install.sh not found — skipping"
fi
# ============================================
# Post-Migration Verification
# ============================================
log_step "Post-migration verification"
ALL_OK=true
# Verify critical migration state
if [ "$DRY_RUN" = false ]; then
log_info "Verifying migration state..."
check_column() {
local table="$1" col="$2" migration="$3"
if docker exec "$PG_CONTAINER" psql -U "$DB_USER" -d "$DB_NAME" -tAc \
"SELECT column_name FROM information_schema.columns WHERE table_name='$table' AND column_name='$col'" 2>/dev/null | grep -q "$col"; then
log_ok "$table.$col exists"
else
log_err "$table.$col MISSING — $migration not applied"
ALL_OK=false
fi
}
check_table() {
local table="$1" migration="$2"
if docker exec "$PG_CONTAINER" psql -U "$DB_USER" -d "$DB_NAME" -tAc \
"SELECT tablename FROM pg_tables WHERE tablename='$table'" 2>/dev/null | grep -q "$table"; then
log_ok "$table table exists"
else
log_err "$table table MISSING — $migration not applied"
ALL_OK=false
fi
}
# Migration 006
check_column "api_keys" "encrypted_key" "migration 006"
# Migration 008
check_table "admin_audit_log" "migration 008"
# Migration 009
check_column "sessions" "token_hash" "migration 009"
# Migration 010
check_column "webui_users" "user_id" "migration 010"
check_column "webui_users" "encrypted_email" "migration 010"
# Migration 011 — check that user_id FK target index exists
if docker exec "$PG_CONTAINER" psql -U "$DB_USER" -d "$DB_NAME" -tAc \
"SELECT indexname FROM pg_indexes WHERE tablename='webui_users' AND indexname='idx_webui_users_user_id_unique'" 2>/dev/null | grep -q "idx_webui_users_user_id_unique"; then
log_ok "webui_users user_id unique index exists"
else
log_err "webui_users user_id unique index MISSING — migration 011 not applied"
ALL_OK=false
fi
# Migration 012 — verify username is nullable (NOT NULL dropped)
if docker exec "$PG_CONTAINER" psql -U "$DB_USER" -d "$DB_NAME" -tAc \
"SELECT is_nullable FROM information_schema.columns WHERE table_name='users' AND column_name='username'" 2>/dev/null | grep -q "YES"; then
log_ok "users.username is nullable (migration 012 applied)"
else
log_warn "users.username is still NOT NULL — migration 012 may not be applied"
ALL_OK=false
fi
# Migration 013
check_column "api_keys" "key_hash" "migration 013"
# Migration 015
check_table "blocked_cids" "migration 015"
# Migration 016
check_column "blocked_cids" "mode" "migration 016"
# ----------------------------------------------------------------------
# Infrastructure binding audit
# Postgres or IPFS bound to 0.0.0.0 lets the public internet hit them
# directly — bypassing nginx and (for Docker) UFW. Past incidents:
# postgres brute-force attempts against role names from public scanners.
# We only WARN here; install.sh has the interactive rebind helper.
# ----------------------------------------------------------------------
log_info "Auditing infrastructure container bindings..."
PG_BIND=$(docker inspect "$PG_CONTAINER" --format '{{range $p, $conf := .NetworkSettings.Ports}}{{if $conf}}{{(index $conf 0).HostIp}}{{end}}{{end}}' 2>/dev/null || echo "")
case "$PG_BIND" in
0.0.0.0)
log_err "$PG_CONTAINER bound to 0.0.0.0:5432 — PUBLIC EXPOSURE"
log_err " Fix: re-run install.sh (interactive rebind), or rebind manually:"
log_err " docker stop $PG_CONTAINER && docker rm $PG_CONTAINER"
log_err " # then docker run with -p 127.0.0.1:5432:5432"
ALL_OK=false
;;
127.0.0.1|::1)
log_ok "$PG_CONTAINER bound to localhost ($PG_BIND:5432)"
;;
"")
log_ok "$PG_CONTAINER has no published port (safe)"
;;
*)
log_warn "$PG_CONTAINER bound to $PG_BIND — verify intentional"
;;
esac
IPFS_CONTAINER_NAME="${IPFS_CONTAINER:-ipfs_host}"
if docker inspect "$IPFS_CONTAINER_NAME" >/dev/null 2>&1; then
IPFS_API=$(docker exec "$IPFS_CONTAINER_NAME" ipfs config Addresses.API 2>/dev/null || echo "")
case "$IPFS_API" in
*/ip4/0.0.0.0/*)
log_err "$IPFS_CONTAINER_NAME API bound to 0.0.0.0:5001 — PUBLIC EXPOSURE"
log_err " Fix: docker exec $IPFS_CONTAINER_NAME ipfs config Addresses.API /ip4/127.0.0.1/tcp/5001"
log_err " docker restart $IPFS_CONTAINER_NAME"
ALL_OK=false
;;
*/ip4/127.0.0.1/*|*/ip6/::1/*)
log_ok "$IPFS_CONTAINER_NAME API bound to localhost"
;;
"")
log_warn "$IPFS_CONTAINER_NAME exists but Addresses.API could not be read"
;;
*)
log_warn "$IPFS_CONTAINER_NAME API binding: $IPFS_API"
;;
esac
fi
# Check ENCRYPTION_KEY is set (required for new user encrypted_email)
ENC_KEY=""
if [ -f "$DEPLOY_DIR/pinning-webui/.env" ]; then
ENC_KEY=$(grep -oP '^ENCRYPTION_KEY=\K.+' "$DEPLOY_DIR/pinning-webui/.env" 2>/dev/null || true)
fi
if [ -n "$ENC_KEY" ]; then
log_ok "ENCRYPTION_KEY is configured: ${ENC_KEY:0:8}...${ENC_KEY: -4}"
echo -e " Press Enter to continue or Ctrl+C to abort..."
read -r
else
log_warn "ENCRYPTION_KEY not set in $DEPLOY_DIR/pinning-webui/.env"
log_warn "New users will NOT have encrypted_email stored. Generate with: openssl rand -hex 32"
# Not fatal — system works without it, just no email recovery for new users
fi
fi
# ============================================
# Summary
# ============================================
echo ""
echo -e "${BOLD}==========================================${NC}"
if [ "$ALL_OK" = true ] || [ "$DRY_RUN" = true ]; then
echo -e "${BOLD}${GREEN} Migrations & Auxiliary Deploys Complete${NC}"
else
echo -e "${BOLD}${YELLOW} Completed (with warnings)${NC}"
fi
echo -e "${BOLD}==========================================${NC}"
echo ""
if [ "$ALL_OK" = false ]; then
if [ -n "${BACKUP_FILE:-}" ] && [ -f "${BACKUP_FILE:-}" ]; then
echo "To revert database changes, restore the pre-migration backup:"
echo " docker exec -i $PG_CONTAINER pg_restore -U $DB_USER -d $DB_NAME --clean --if-exists < $BACKUP_FILE"
echo ""
fi
fi
echo -e "${BOLD}Next: deploy services manually${NC}"
echo ""
echo " ## pinning-webui (port 3000)"
echo " cd ~/pinning-service/pinning-webui && git pull"
echo " npm install"
echo " VITE_GOOGLE_CLIENT_ID=<id> VITE_WALLETCONNECT_PROJECT_ID=<id> VITE_APPLE_CLIENT_ID=land.fx.cloud npm run build"
echo " cp -r dist/* /home/root/pinning-service/pinning-webui/dist/"
echo " cp package.json package-lock.json /home/root/pinning-service/pinning-webui/"
echo " cd /home/root/pinning-service/pinning-webui"
echo " npm install --production --ignore-scripts=false"
echo " npm audit fix"
echo " npm rebuild"
echo " systemctl restart fula-pinning-webui"
echo ""
echo " ## Go pinning-service (port 6000)"
echo " cd ~/pinning-service && git pull"
echo " go mod download"
echo " go build -o /home/root/pinning-service/ipfs-pinning.new main_postgres.go"
echo " systemctl stop fula-pinning-service"
echo " mv /home/root/pinning-service/ipfs-pinning.new /home/root/pinning-service/ipfs-pinning"
echo " systemctl start fula-pinning-service"
echo ""
echo " ## ipfs-server (upload gateway)"
echo " cd ~/pinning-service/ipfs-server && git pull"
echo " npm install --production=false && npm audit fix && npm run build"
echo " systemctl stop fula-upload-server"
echo " cp -r dist/* /home/root/pinning-service/ipfs-server/dist/"
echo " cp package.json package-lock.json /home/root/pinning-service/ipfs-server/ 2>/dev/null || true"
echo " cd /home/root/pinning-service/ipfs-server && npm install --production --ignore-scripts=false"
echo " systemctl start fula-upload-server"
echo ""
# Post-deployment hints
if [ -z "$(grep -oP '^ENCRYPTION_KEY=\K.+' "$DEPLOY_DIR/pinning-webui/.env" 2>/dev/null || true)" ]; then
echo -e "${YELLOW} Set ENCRYPTION_KEY (required for encrypted email storage):${NC}"
echo " openssl rand -hex 32"
echo " Add ENCRYPTION_KEY=<hex> to $DEPLOY_DIR/pinning-webui/.env"
echo " systemctl restart fula-pinning-webui"
echo ""
fi
echo " Backups (one-time setup):"
echo " 1. Generate IPNS key: docker exec ipfs_host ipfs key gen fula-db-backup"
echo " 2. Set BACKUP_ENCRYPTION_KEY: openssl rand -hex 32"
echo " 3. Add cron job: 0 3 * * * BACKUP_ENCRYPTION_KEY=<hex> $SCRIPT_DIR/scripts/backup-db.sh >> /var/log/fula-db-backup.log 2>&1"
echo ""