-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdeploy.sh
More file actions
executable file
·745 lines (684 loc) · 33.5 KB
/
deploy.sh
File metadata and controls
executable file
·745 lines (684 loc) · 33.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
#!/usr/bin/env bash
set -euo pipefail
# ---------------------------------------------------------------------------
# deploy.sh — deploy Genie Workbench app and configure permissions.
#
# Three modes:
# Full deploy (default):
# 1. Pre-flight checks
# 2. Build frontend
# 3. Create app (if not exists)
# 4. Full-sync files to workspace
# 5. Resolve app SP + Grant UC permissions (+ enable CDF on GSO tables)
# 6. Deploy optimization job via bundle (databricks bundle deploy -t app)
# 7. Redeploy app (apps deploy --source-code-path)
# 8. Verify deployment
#
# Update mode (--update):
# 1. Pre-flight checks
# 2. Build frontend
# 3. Sync files to workspace
# 4. Resolve app SP + Grant UC permissions (+ enable CDF on GSO tables)
# 5. Deploy optimization job via bundle
# 6. Redeploy app (apps deploy --source-code-path)
# 7. Verify deployment
# Skips app creation.
# Use for code-only changes when the app already exists.
#
# Destroy mode (--destroy):
# 1. Clean up runtime-created jobs
# 2. Destroy bundle-managed optimization job
# 3. Delete the app
#
# Usage:
# export GENIE_WAREHOUSE_ID=<your-warehouse-id> # required
# export GENIE_CATALOG=my_catalog # required
# ./scripts/deploy.sh # full deploy
# ./scripts/deploy.sh --update # code-only update
# ./scripts/deploy.sh --destroy # destroy everything
# ./scripts/deploy.sh --destroy --auto-approve # destroy without confirmation
#
# Or use a .env.deploy file (see deploy-config.sh for all options).
# ---------------------------------------------------------------------------
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
# ── Parse flags ────────────────────────────────────────────────────────────
UPDATE_ONLY=false
DESTROY_MODE=false
AUTO_APPROVE=false
for arg in "$@"; do
case "$arg" in
--update) UPDATE_ONLY=true ;;
--destroy) DESTROY_MODE=true ;;
--auto-approve) AUTO_APPROVE=true ;;
esac
done
# shellcheck source=deploy-config.sh
source "$SCRIPT_DIR/deploy-config.sh"
# shellcheck source=preflight.sh
source "$SCRIPT_DIR/preflight.sh"
# ═══════════════════════════════════════════════════════════════════════════
# DESTROY MODE
# ═══════════════════════════════════════════════════════════════════════════
if [ "$DESTROY_MODE" = "true" ]; then
echo "╔══════════════════════════════════════════════════════════════╗"
echo "║ Genie Workbench — Destroy ║"
echo "╚══════════════════════════════════════════════════════════════╝"
_print_config
if [ "$AUTO_APPROVE" != "true" ]; then
echo ""
echo " This will permanently delete the app, optimization job, and all bundle state."
echo -n " Continue? [y/N]: "
read -r confirm
if [[ ! "$confirm" =~ ^[Yy] ]]; then
echo " Cancelled."
exit 0
fi
fi
# ── Step 1: Clean up runtime-created jobs ──────────────────────────
echo ""
echo "▸ Step 1/3: Cleaning up runtime-created jobs..."
RUNTIME_JOBS=$(
databricks jobs list --profile "$PROFILE" -o json 2>/dev/null \
| python3 -c "
import sys, json
jobs = json.load(sys.stdin)
for j in (jobs if isinstance(jobs, list) else jobs.get('jobs', [])):
tags = (j.get('settings') or {}).get('tags', {})
if tags.get('app') == '$APP_NAME' and (
tags.get('pattern') == 'deployment-job' or
tags.get('managed-by') == 'backend-job-launcher'
):
print(j['job_id'])
" 2>/dev/null || true
)
if [ -z "$RUNTIME_JOBS" ]; then
echo " No runtime-created jobs found."
else
DELETED=0
for JID in $RUNTIME_JOBS; do
echo " Deleting runtime job $JID..."
if databricks jobs delete "$JID" --profile "$PROFILE" 2>/dev/null; then
DELETED=$((DELETED + 1))
else
echo " ⚠ Could not delete job $JID (may already be deleted)"
fi
done
echo " ✓ Cleaned up $DELETED runtime job(s)"
fi
# ── Step 2: Destroy bundle-managed optimization job ───────────────
echo ""
echo "▸ Step 2/3: Destroying bundle-managed optimization job..."
if (cd "$PROJECT_DIR" && databricks bundle destroy -t app \
--var="catalog=${CATALOG}" \
--var="warehouse_id=${WAREHOUSE_ID:-placeholder}" \
--profile "$PROFILE" --auto-approve 2>&1 | sed 's/^/ /'); then
echo " ✓ Bundle resources destroyed"
else
echo " ⚠ Bundle destroy failed or no bundle state found (OK on first deploy)"
fi
# ── Step 3: Delete the app ───────────────────────────────────────
echo ""
echo "▸ Step 3/3: Deleting app '$APP_NAME'..."
if databricks apps get "$APP_NAME" --profile "$PROFILE" &>/dev/null; then
if databricks apps delete "$APP_NAME" --profile "$PROFILE" 2>/dev/null; then
echo " ✓ App '$APP_NAME' deleted"
else
echo " ✗ Could not delete app '$APP_NAME'."
echo " Try deleting manually via the Databricks Apps UI."
exit 1
fi
else
echo " App '$APP_NAME' does not exist — nothing to delete."
fi
echo ""
echo "═══════════════════════════════════════════════════════════════"
echo " Destroy complete."
echo "═══════════════════════════════════════════════════════════════"
exit 0
fi
# ═══════════════════════════════════════════════════════════════════════════
# DEPLOY / UPDATE MODE
# ═══════════════════════════════════════════════════════════════════════════
if [ "$UPDATE_ONLY" = "true" ]; then
TOTAL_STEPS=7
DEPLOY_LABEL="Code Update"
else
TOTAL_STEPS=8
DEPLOY_LABEL="Full Deploy"
fi
echo "╔══════════════════════════════════════════════════════════════╗"
echo "║ Genie Workbench — $DEPLOY_LABEL$(printf '%*s' $((37 - ${#DEPLOY_LABEL})) '')║"
echo "╚══════════════════════════════════════════════════════════════╝"
_print_config
# ── Step 1: Pre-flight checks ─────────────────────────────────────────
echo ""
echo "▸ Step 1/$TOTAL_STEPS: Pre-flight checks..."
_preflight_check_tools
_preflight_check_venv
_preflight_check_npm_registry
_preflight_check_profile "$PROFILE"
# Resolve deployer email (needed for workspace paths)
DEPLOYER=$(databricks current-user me --profile "$PROFILE" -o json \
| python3 -c "import sys,json; print(json.load(sys.stdin)['userName'])")
WS_PATH="/Workspace/Users/$DEPLOYER/$APP_NAME"
_preflight_check_warehouse "$WAREHOUSE_ID" "$PROFILE"
_preflight_check_catalog "$CATALOG" "$PROFILE"
_preflight_check_app_state "$APP_NAME" "$PROFILE"
echo " ✓ All pre-flight checks passed"
# ── Step 2: Build frontend ─────────────────────────────────────────────
STEP=2
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Building frontend..."
if ! (cd "$PROJECT_DIR/frontend" && npm ci --silent && npm run build --silent); then
echo " ✗ Frontend build failed (npm returned non-zero exit code)."
exit 1
fi
if [ ! -f "$PROJECT_DIR/frontend/dist/index.html" ]; then
echo " ✗ Frontend build failed — frontend/dist/index.html not found."
exit 1
fi
echo " ✓ Frontend built"
if [ "$UPDATE_ONLY" = "true" ]; then
# ── Step 3 (update): Sync files to workspace ──────────────────────
STEP=3
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Syncing files to workspace..."
# Clean sync: delete workspace dir first so deleted local files don't linger.
# databricks sync --full only uploads — it never removes stale remote files.
echo " Cleaning stale workspace files..."
databricks workspace delete "$WS_PATH" --profile "$PROFILE" --recursive 2>/dev/null || true
databricks sync "$PROJECT_DIR" "$WS_PATH" --profile "$PROFILE" --full \
--exclude-from "$PROJECT_DIR/.databricksignore"
# frontend/dist/ is gitignored so databricks sync skips it — upload explicitly
echo " Uploading frontend build artifacts..."
databricks workspace import-dir "$PROJECT_DIR/frontend/dist" \
"$WS_PATH/frontend/dist" --profile "$PROFILE" --overwrite
echo " ✓ Files synced to $WS_PATH"
else
# ── Step 3 (full): Create app if not exists ──────────────────────
STEP=3
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Creating app (if not exists)..."
if databricks apps get "$APP_NAME" --profile "$PROFILE" &>/dev/null; then
echo " ✓ App '$APP_NAME' already exists"
else
echo " Creating app '$APP_NAME'..."
APP_CREATE_JSON=$(python3 -c "import json; print(json.dumps({'name': '$APP_NAME', 'description': 'Genie Workbench - Create, score, and optimize Genie Spaces'}))")
if databricks apps create --json "$APP_CREATE_JSON" --profile "$PROFILE" --no-wait 2>/dev/null; then
echo " ✓ App created (compute starting in background)"
else
echo " ✗ Could not create app '$APP_NAME'."
echo ""
echo " Remediation:"
echo " 1. Check if the app name is available"
echo " 2. Ensure you have permission to create apps"
echo " 3. Try creating manually in the Databricks Apps UI"
exit 1
fi
fi
# ── Step 4 (full): Full-sync files to workspace ───────────────────
STEP=4
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Syncing files to workspace..."
# Clean sync: delete workspace dir first so deleted local files don't linger.
# databricks sync --full only uploads — it never removes stale remote files.
echo " Cleaning stale workspace files..."
databricks workspace delete "$WS_PATH" --profile "$PROFILE" --recursive 2>/dev/null || true
databricks sync "$PROJECT_DIR" "$WS_PATH" --profile "$PROFILE" --full \
--exclude-from "$PROJECT_DIR/.databricksignore"
# frontend/dist/ is gitignored so databricks sync skips it — upload explicitly
echo " Uploading frontend build artifacts..."
databricks workspace import-dir "$PROJECT_DIR/frontend/dist" \
"$WS_PATH/frontend/dist" --profile "$PROFILE" --overwrite
echo " ✓ Full sync complete"
fi
# ── Resolve app SP + Grant UC permissions ────────────────────────────────
STEP=$((STEP + 1))
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Resolving app SP and granting UC permissions..."
SP_CLIENT_ID=$(
databricks apps get "$APP_NAME" --profile "$PROFILE" -o json \
| python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('service_principal_client_id','') or d.get('service_principal_name',''))"
)
if [ -z "$SP_CLIENT_ID" ]; then
echo " ✗ Could not resolve SP for app '$APP_NAME'. Is the app created?"
exit 1
fi
echo " ✓ SP client ID: $SP_CLIENT_ID"
python3 "$SCRIPT_DIR/grant_permissions.py" \
--profile "$PROFILE" \
--app-name "$APP_NAME" \
--catalog "$CATALOG" \
--schema "$GSO_SCHEMA" \
--warehouse-id "$WAREHOUSE_ID"
echo " ✓ UC grants applied"
# ── Deploy optimization job via bundle ────────────────────────────────────
STEP=$((STEP + 1))
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Deploying optimization job via bundle..."
# Pre-validate: SP must be resolved (needed for post-deploy job permissions)
if [ -z "$SP_CLIENT_ID" ]; then
echo " ✗ Cannot deploy optimization job: SP client ID is empty."
echo ""
echo " Remediation: ensure the app '$APP_NAME' exists and has a service principal."
exit 1
fi
# databricks bundle deploy -t app:
# - Builds the GSO wheel (artifacts block)
# - Syncs job notebooks to workspace
# - Creates/updates the optimization job (Terraform-managed)
# run_as is NOT set in the bundle — the app self-heals it at startup
# via _ensure_gso_job_run_as() in backend/main.py (avoids needing
# servicePrincipal.user role on the deployer).
# The "app" target uses mode: development (per-deployer Terraform state)
# with presets.name_prefix: "" (clean job names, no [dev] prefix).
# Force full file sync on every deploy. The bundle CLI uses local snapshot
# files to do incremental uploads; if the snapshot drifts from the workspace
# (e.g. interrupted upload, workspace cleanup) notebooks silently go missing
# and the job fails at runtime. Deleting the snapshots is cheap — the wheel
# upload dominates deploy time, not the ~350 small file uploads.
rm -f "$PROJECT_DIR/.databricks/bundle/app/sync-snapshots/"*.json 2>/dev/null || true
set +e
BUNDLE_OUTPUT=$(cd "$PROJECT_DIR" && databricks bundle deploy -t app \
--var="catalog=$CATALOG" \
--var="warehouse_id=$WAREHOUSE_ID" \
--profile "$PROFILE" 2>&1)
BUNDLE_EXIT=$?
set -e
echo "$BUNDLE_OUTPUT" | sed 's/^/ /'
if [ "$BUNDLE_EXIT" -ne 0 ]; then
echo ""
echo " ✗ Bundle deploy failed (exit code $BUNDLE_EXIT)."
echo ""
echo " Remediation:"
echo " 1. Check the error output above"
echo " 2. Common causes:"
echo " - Databricks CLI too old (need >= 0.239.0)"
echo " - Auth issue with profile '$PROFILE'"
echo " - GSO wheel build failure (missing 'build' package)"
echo " - Terraform state conflict (try: databricks bundle deploy -t app --force-lock)"
echo " 3. Fix the issue and re-run: ./scripts/deploy.sh --update"
exit 1
fi
# Verify critical job notebooks actually landed on the workspace.
# The bundle's incremental sync can silently skip files if the local snapshot
# diverges from workspace reality. This catches that failure at deploy time
# rather than at job runtime.
_PREFLIGHT_NB="/Workspace/Users/$DEPLOYER/.bundle/genie-workbench/app/files/packages/genie-space-optimizer/src/genie_space_optimizer/jobs/run_preflight"
if ! databricks workspace get-status "$_PREFLIGHT_NB" --profile "$PROFILE" -o json >/dev/null 2>&1; then
echo ""
echo " ✗ FATAL: Bundle file sync failed — job notebook not found at:"
echo " $_PREFLIGHT_NB"
echo ""
echo " Remediation:"
echo " 1. Delete stale sync snapshots:"
echo " rm -f .databricks/bundle/app/sync-snapshots/*.json"
echo " 2. Re-run: ./scripts/deploy.sh --update"
exit 1
fi
echo " ✓ Job notebooks verified on workspace"
JOB_ID=$(cd "$PROJECT_DIR" && databricks bundle summary -t app \
--var="catalog=$CATALOG" \
--var="warehouse_id=$WAREHOUSE_ID" \
--profile "$PROFILE" -o json 2>/dev/null \
| python3 -c "
import sys, json
s = json.load(sys.stdin)
print(s['resources']['jobs']['gso-optimization-runner']['id'])
" 2>/dev/null) || true
if [ -z "$JOB_ID" ]; then
echo " ✗ Bundle deployed but could not resolve job ID from Terraform state."
echo ""
echo " Remediation:"
echo " 1. Run: databricks bundle summary -t app --profile $PROFILE -o json"
echo " 2. Check if resources.jobs.gso-optimization-runner.id exists"
echo " 3. Re-run: ./scripts/deploy.sh --update"
exit 1
fi
echo " ✓ Optimization job deployed: $JOB_ID"
# Grant job permissions (bundle manages run_as; API call sets ownership + SP access)
PERM_PAYLOAD=$(python3 -c "
import json
acl = [
{'user_name': '$DEPLOYER', 'permission_level': 'IS_OWNER'},
{'group_name': 'users', 'permission_level': 'CAN_VIEW'},
{'service_principal_name': '$SP_CLIENT_ID', 'permission_level': 'CAN_MANAGE'},
]
print(json.dumps({'access_control_list': acl}))
")
if databricks api put "/api/2.0/permissions/jobs/$JOB_ID" --profile "$PROFILE" --json "$PERM_PAYLOAD" 2>/dev/null; then
echo " ✓ Job permissions updated (owner=$DEPLOYER, SP=CAN_MANAGE, users=CAN_VIEW)"
else
echo " ⚠ Could not set job permissions — SP may not be able to trigger optimization runs."
fi
# Grant SP access to bundle workspace notebooks so the job can read them.
# Bundle deploys notebooks under the deployer's .bundle/ directory, which is
# private by default. The SP needs CAN_MANAGE to run notebooks from there.
WS_BUNDLE_ROOT="/Workspace/Users/$DEPLOYER/.bundle/genie-workbench/app"
BUNDLE_DIR_OBJ_ID=$(
databricks workspace get-status "$WS_BUNDLE_ROOT" --profile "$PROFILE" -o json 2>/dev/null \
| python3 -c "import sys,json; print(json.load(sys.stdin)['object_id'])" 2>/dev/null
) || true
if [ -n "$BUNDLE_DIR_OBJ_ID" ]; then
if databricks api patch "/api/2.0/permissions/directories/$BUNDLE_DIR_OBJ_ID" \
--profile "$PROFILE" \
--json "{\"access_control_list\": [{\"service_principal_name\": \"$SP_CLIENT_ID\", \"permission_level\": \"CAN_MANAGE\"}]}" 2>/dev/null; then
echo " ✓ SP granted CAN_MANAGE on bundle workspace directory"
else
echo " ⚠ Could not grant SP access to bundle notebooks — job may fail to read notebooks"
fi
else
echo " ⚠ Could not resolve bundle workspace directory — SP may lack notebook access"
fi
# Clean up legacy jobs created by the old ensure_gso_job.py script.
# These have name "genie-space-optimizer-job" and tag "persistent-dag"
# but are NOT the bundle-managed job (different ID).
LEGACY_JOBS=$(databricks jobs list --profile "$PROFILE" -o json 2>/dev/null \
| python3 -c "
import sys, json
bundle_id = '$JOB_ID'
jobs = json.load(sys.stdin)
for j in (jobs if isinstance(jobs, list) else jobs.get('jobs', [])):
tags = (j.get('settings') or {}).get('tags', {})
name = (j.get('settings') or {}).get('name', '')
jid = str(j.get('job_id', ''))
if (tags.get('pattern') == 'persistent-dag'
and tags.get('app') in ('genie-workbench', 'genie-space-optimizer')
and jid != bundle_id):
print(jid)
" 2>/dev/null || true)
for OLD_JID in $LEGACY_JOBS; do
echo " ℹ Found legacy optimization job $OLD_JID — deleting..."
databricks jobs delete "$OLD_JID" --profile "$PROFILE" 2>/dev/null && \
echo " ✓ Legacy job $OLD_JID deleted" || \
echo " ⚠ Could not delete legacy job $OLD_JID — delete it manually"
done
# ── Redeploy app (ensures freshest code) ─────────────────────────────────
STEP=$((STEP + 1))
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Redeploying app with freshest code..."
# Patch app.yaml on workspace with real GSO values before apps deploy.
# apps deploy reads app.yaml and uses it as the complete env config,
# overwriting whatever was previously set. So we must inject the real values.
echo " Patching app.yaml on workspace with GSO config..."
PATCHED_APP_YAML="/tmp/app.yaml.patched"
cp "$PROJECT_DIR/app.yaml" "$PATCHED_APP_YAML"
sed -i.bak "s|__WAREHOUSE_ID__|$WAREHOUSE_ID|" "$PATCHED_APP_YAML"
sed -i.bak "s|__GSO_CATALOG__|$CATALOG|" "$PATCHED_APP_YAML"
sed -i.bak "s|__LAKEBASE_INSTANCE__|$LAKEBASE_INSTANCE|" "$PATCHED_APP_YAML"
sed -i.bak "s|__LLM_MODEL__|$LLM_MODEL|" "$PATCHED_APP_YAML"
sed -i.bak "s|__MLFLOW_EXPERIMENT_ID__|$MLFLOW_EXPERIMENT_ID|" "$PATCHED_APP_YAML"
if [ -n "$JOB_ID" ]; then
sed -i.bak "s|__GSO_JOB_ID__|$JOB_ID|" "$PATCHED_APP_YAML"
fi
rm -f "${PATCHED_APP_YAML}.bak"
# Validate all placeholders were resolved
UNRESOLVED=$(grep -c '__[A-Z_]*__' "$PATCHED_APP_YAML" || true)
if [ "$UNRESOLVED" -gt 0 ]; then
echo " ⚠ app.yaml has $UNRESOLVED unresolved placeholder(s):"
grep '__[A-Z_]*__' "$PATCHED_APP_YAML" | sed 's/^/ /'
fi
databricks workspace import "$WS_PATH/app.yaml" \
--profile "$PROFILE" --file "$PATCHED_APP_YAML" --format AUTO --overwrite 2>/dev/null && \
echo " ✓ app.yaml patched (WAREHOUSE=$WAREHOUSE_ID, GSO_CATALOG=$CATALOG, GSO_JOB_ID=${JOB_ID:-<none>}, LAKEBASE_INSTANCE=$LAKEBASE_INSTANCE, LLM_MODEL=$LLM_MODEL, MLFLOW=${MLFLOW_EXPERIMENT_ID:-<disabled>})" || \
echo " ⚠ Could not patch app.yaml — config may not be set"
# Sync _metadata.py — required at runtime for the genie_space_optimizer
# package to import. Previously gitignored (generated by apx build), now
# checked in as a static stub. Fail hard if missing or upload fails so
# the optimization trigger doesn't crash with "No module named _metadata".
METADATA_SRC="$PROJECT_DIR/packages/genie-space-optimizer/src/genie_space_optimizer/_metadata.py"
METADATA_DST="$WS_PATH/packages/genie-space-optimizer/src/genie_space_optimizer/_metadata.py"
if [ ! -f "$METADATA_SRC" ]; then
echo " ✗ FATAL: _metadata.py not found at:"
echo " $METADATA_SRC"
echo ""
echo " This file is required at runtime. It should exist in the repo."
echo " If missing after a fresh clone, recreate it:"
echo ""
echo " cat > packages/genie-space-optimizer/src/genie_space_optimizer/_metadata.py << 'PYEOF'"
echo " from pathlib import Path"
echo " app_name: str = \"genie-space-optimizer\""
echo " app_slug: str = \"genie_space_optimizer\""
echo " api_prefix: str = \"/api/genie\""
echo " dist_dir: Path = Path(__file__).resolve().parent / \"__dist__\""
echo " PYEOF"
exit 1
fi
databricks workspace import "$METADATA_DST" \
--profile "$PROFILE" --file "$METADATA_SRC" --format AUTO --overwrite || {
echo " ✗ FATAL: Could not upload _metadata.py to workspace."
echo " The optimization trigger will fail without this file."
exit 1
}
echo " ✓ _metadata.py synced"
# Ensure app compute is running before deploying
APP_STATE=$(databricks apps get "$APP_NAME" --profile "$PROFILE" -o json \
| python3 -c "import sys,json; print(json.load(sys.stdin).get('compute_status',{}).get('state','UNKNOWN'))")
if [ "$APP_STATE" != "ACTIVE" ]; then
echo " ℹ App compute is $APP_STATE — starting..."
databricks apps start "$APP_NAME" --profile "$PROFILE" --no-wait 2>/dev/null || true
echo " Waiting for app compute to reach ACTIVE state..."
for i in $(seq 1 30); do
sleep 10
APP_STATE=$(databricks apps get "$APP_NAME" --profile "$PROFILE" -o json \
| python3 -c "import sys,json; print(json.load(sys.stdin).get('compute_status',{}).get('state','UNKNOWN'))")
if [ "$APP_STATE" = "ACTIVE" ]; then
echo " ✓ App compute is ACTIVE"
break
fi
echo " ... $APP_STATE (attempt $i/30)"
done
if [ "$APP_STATE" != "ACTIVE" ]; then
echo " ⚠ App compute did not reach ACTIVE state after 5 minutes."
echo " Proceeding with deploy anyway — it may start on deployment."
fi
else
echo " ✓ App compute is already ACTIVE"
fi
# ── Set up Lakebase Autoscaling (if configured) ──────────────────────────
if [ -n "$LAKEBASE_INSTANCE" ] && [ -n "$SP_CLIENT_ID" ]; then
echo " Setting up Lakebase Autoscaling..."
uv run python "$SCRIPT_DIR/setup_lakebase.py" \
--profile "$PROFILE" \
--project-name "$LAKEBASE_INSTANCE" \
--sp-client-id "$SP_CLIENT_ID" 2>&1 || \
echo " ⚠ Lakebase setup had errors — app will fall back to in-memory storage"
fi
# ── Resolve Lakebase database ID (needed for postgres resource) ───────────
LAKEBASE_DB_RESOURCE=""
if [ -n "$LAKEBASE_INSTANCE" ]; then
LAKEBASE_DB_RESOURCE=$(databricks api get "/api/2.0/postgres/projects/$LAKEBASE_INSTANCE/branches/production/databases" \
--profile "$PROFILE" -o json 2>/dev/null \
| python3 -c "
import sys, json
try:
data = json.load(sys.stdin)
dbs = data.get('databases', [])
if dbs:
print(dbs[0]['name'])
except Exception: pass
" 2>/dev/null || true)
if [ -n "$LAKEBASE_DB_RESOURCE" ]; then
echo " ✓ Lakebase database: $LAKEBASE_DB_RESOURCE"
else
echo " ⚠ Could not resolve Lakebase database ID — postgres resource won't be auto-configured"
fi
fi
# ── Configure app scopes and resources ───────────────────────────────────
# The PATCH API is the mechanism that configures both user_api_scopes and
# resources on a Databricks App. app.yaml user_api_scopes are documentation
# only; apps deploy does not apply them.
echo " Configuring app scopes and resources..."
EXISTING_RESOURCES=$(databricks apps get "$APP_NAME" --profile "$PROFILE" -o json 2>/dev/null \
| python3 -c "import sys,json; print(json.dumps(json.load(sys.stdin).get('resources',[])))" 2>/dev/null || echo "[]")
PATCH_PAYLOAD=$(python3 -c "
import json
scopes = ['sql', 'dashboards.genie', 'serving.serving-endpoints',
'catalog.catalogs:read', 'catalog.schemas:read',
'catalog.tables:read', 'files.files']
# Start with existing resources. The PATCH API replaces all resources,
# so we must include everything. Preserve all resources that either have
# full config or are referenced by app.yaml (e.g. postgres for Lakebase).
existing = json.loads('$EXISTING_RESOURCES')
app_yaml_resources = {'sql-warehouse', 'postgres'} # referenced by valueFrom in app.yaml
by_name = {}
for r in existing:
has_config = any(k for k in r if k != 'name')
if has_config or r.get('name') in app_yaml_resources:
by_name[r['name']] = r
# Ensure sql-warehouse is set with the correct ID
by_name['sql-warehouse'] = {'name': 'sql-warehouse', 'sql_warehouse': {'id': '$WAREHOUSE_ID', 'permission': 'CAN_USE'}}
# Ensure postgres resource has full config when Lakebase is configured.
# The database field requires the full resource path (e.g.
# projects/<name>/branches/production/databases/<db-id>), not just the
# postgres database name.
lakebase_db = '$LAKEBASE_DB_RESOURCE'
if lakebase_db:
branch = '/'.join(lakebase_db.split('/')[:4]) # projects/<name>/branches/<branch>
by_name['postgres'] = {
'name': 'postgres',
'postgres': {
'branch': branch,
'database': lakebase_db,
'permission': 'CAN_CONNECT_AND_CREATE',
}
}
print(json.dumps({'user_api_scopes': scopes, 'resources': list(by_name.values())}))
")
databricks api patch "/api/2.0/apps/$APP_NAME" \
--profile "$PROFILE" --json "$PATCH_PAYLOAD" 2>/dev/null && \
echo " ✓ App scopes and resources configured (sql-warehouse: $WAREHOUSE_ID)" || \
echo " ⚠ Could not configure app scopes/resources"
databricks apps deploy "$APP_NAME" --profile "$PROFILE" \
--source-code-path "$WS_PATH" --no-wait
echo " ✓ App deployment triggered from $WS_PATH"
# ── Verify deployment ────────────────────────────────────────────────────
STEP=$((STEP + 1))
echo ""
echo "▸ Step $STEP/$TOTAL_STEPS: Verifying deployment..."
VERIFY_OK=true
# Check critical files exist on workspace
echo " Checking critical files on workspace..."
CRITICAL_FILES=(
"$WS_PATH/backend/main.py"
"$WS_PATH/backend/__init__.py"
"$WS_PATH/pyproject.toml"
"$WS_PATH/frontend/dist/index.html"
"$WS_PATH/app.yaml"
)
MISSING_FILES=()
for f in "${CRITICAL_FILES[@]}"; do
if ! databricks workspace get-status "$f" --profile "$PROFILE" &>/dev/null; then
MISSING_FILES+=("$(basename "$f")")
fi
done
if [ ${#MISSING_FILES[@]} -gt 0 ]; then
echo " ✗ Missing critical files on workspace: ${MISSING_FILES[*]}"
echo ""
echo " Remediation: re-run deploy or use --update mode."
VERIFY_OK=false
else
echo " ✓ All critical files present on workspace"
fi
# Wait for deployment to settle and check status
echo " Waiting for app deployment to settle..."
DEPLOY_STATE="IN_PROGRESS"
for i in $(seq 1 18); do
sleep 10
APP_JSON=$(databricks apps get "$APP_NAME" --profile "$PROFILE" -o json 2>/dev/null)
DEPLOY_STATE=$(echo "$APP_JSON" | python3 -c "
import sys,json
d=json.load(sys.stdin)
ad = d.get('pending_deployment',{}) or d.get('active_deployment',{})
print(ad.get('status',{}).get('state','UNKNOWN'))
" 2>/dev/null || echo "UNKNOWN")
if [ "$DEPLOY_STATE" != "IN_PROGRESS" ]; then
break
fi
echo " ... $DEPLOY_STATE (attempt $i/18)"
done
APP_URL=$(echo "$APP_JSON" | python3 -c "import sys,json; print(json.load(sys.stdin).get('url',''))" 2>/dev/null || true)
APP_STATUS=$(echo "$APP_JSON" | python3 -c "import sys,json; print(json.load(sys.stdin).get('app_status',{}).get('state','UNKNOWN'))" 2>/dev/null || true)
if [ "$DEPLOY_STATE" = "SUCCEEDED" ]; then
echo " ✓ App deployment SUCCEEDED"
# Wait for app to finish restarting and reach RUNNING state
if [ "$APP_STATUS" != "RUNNING" ]; then
echo " Waiting for app to reach RUNNING state..."
for i in $(seq 1 12); do
sleep 10
APP_JSON=$(databricks apps get "$APP_NAME" --profile "$PROFILE" -o json 2>/dev/null)
APP_STATUS=$(echo "$APP_JSON" | python3 -c "import sys,json; print(json.load(sys.stdin).get('app_status',{}).get('state','UNKNOWN'))" 2>/dev/null || echo "UNKNOWN")
APP_URL=$(echo "$APP_JSON" | python3 -c "import sys,json; print(json.load(sys.stdin).get('url',''))" 2>/dev/null || true)
if [ "$APP_STATUS" = "RUNNING" ]; then
break
fi
if [ "$APP_STATUS" = "CRASHED" ] || [ "$APP_STATUS" = "UNAVAILABLE" ]; then
break
fi
echo " ... app is $APP_STATUS (attempt $i/12)"
done
fi
if [ "$APP_STATUS" = "RUNNING" ]; then
echo " ✓ App is RUNNING"
elif [ "$APP_STATUS" = "CRASHED" ] || [ "$APP_STATUS" = "UNAVAILABLE" ]; then
echo " ✗ App status: $APP_STATUS"
echo " Check logs: databricks apps logs $APP_NAME --profile $PROFILE"
VERIFY_OK=false
else
echo " ℹ App is still $APP_STATUS — it may need more time to start."
echo " Check status: databricks apps get $APP_NAME --profile $PROFILE"
fi
elif [ "$DEPLOY_STATE" = "FAILED" ]; then
DEPLOY_MSG=$(echo "$APP_JSON" | python3 -c "
import sys,json; d=json.load(sys.stdin)
ad = d.get('pending_deployment',{}) or d.get('active_deployment',{})
print(ad.get('status',{}).get('message','unknown error'))
" 2>/dev/null || echo "unknown")
echo " ✗ App deployment FAILED: $DEPLOY_MSG"
echo ""
echo " Remediation:"
echo " 1. Check logs: databricks apps logs $APP_NAME --profile $PROFILE"
echo " 2. Common causes:"
echo " - Missing Python dependencies (check pyproject.toml and uv.lock)"
echo " - Import errors (check backend/main.py and its imports)"
echo " - Missing frontend/dist/ (gitignored, must be built + uploaded)"
echo " 3. Fix the issue and re-run: ./scripts/deploy.sh --update"
VERIFY_OK=false
elif [ "$DEPLOY_STATE" = "IN_PROGRESS" ]; then
echo " ℹ App deployment still IN_PROGRESS after 3 minutes"
echo " Check status: databricks apps get $APP_NAME --profile $PROFILE"
else
echo " ℹ App deployment state: $DEPLOY_STATE"
fi
echo ""
echo "═══════════════════════════════════════════════════════════════"
echo " Deploy complete!"
echo " App: $APP_NAME"
echo " Job: ${JOB_ID:-<not found>}"
echo " SP: $SP_CLIENT_ID"
echo " Deployer: $DEPLOYER"
echo ""
if [ -n "$APP_URL" ]; then
echo " URL: $APP_URL"
else
echo " URL: https://${APP_NAME}-*.databricksapps.com (available shortly)"
fi
echo ""
if [ "$VERIFY_OK" != "true" ]; then
echo " Status: DEPLOY FAILED — review errors above"
echo ""
echo " Quick debug:"
echo " databricks apps logs $APP_NAME --profile $PROFILE"
elif [ "$APP_STATUS" = "RUNNING" ]; then
echo " Status: App is RUNNING ✓"
else
echo " Status: Deploy succeeded, app is $APP_STATUS"
echo " The app may need a minute to finish starting."
fi
echo ""
echo " NOTE: If you see 'Failed to list spaces' in the app, attach a"
echo " Lakebase PostgreSQL resource named 'postgres' in the Apps UI"
echo " with CAN_CONNECT_AND_CREATE permission. The app will auto-retry"
echo " schema creation — no redeploy needed."
echo "═══════════════════════════════════════════════════════════════"