|
| 1 | +#!/usr/bin/env bash |
| 2 | +# |
| 3 | +# Create a KIND cluster named ${CLUSTER} (default: kms-health-claude) |
| 4 | +# with a KMSv2 fake plugin (static pod) and a health-monitor Deployment |
| 5 | +# pinned to the control-plane node. Self-contained — doesn't depend on |
| 6 | +# kubernetes/kubernetes/hack/local-up-kms. |
| 7 | +# |
| 8 | +# Run from the library-go module root. |
| 9 | + |
| 10 | +set -o errexit |
| 11 | +set -o nounset |
| 12 | +set -o pipefail |
| 13 | + |
| 14 | +# Force docker — this harness's Dockerfiles build with `docker build` |
| 15 | +# and `kind load docker-image` needs the same backend. User may have |
| 16 | +# KIND_EXPERIMENTAL_PROVIDER=podman set globally; unset locally. |
| 17 | +unset KIND_EXPERIMENTAL_PROVIDER |
| 18 | + |
| 19 | +CLUSTER="${CLUSTER:-kms-health-claude}" |
| 20 | +KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-kindest/node:v1.33.0}" |
| 21 | + |
| 22 | +HARNESS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" |
| 23 | + |
| 24 | +echo "[up.sh] cluster=${CLUSTER} node=${KIND_NODE_IMAGE}" |
| 25 | +echo "[up.sh] harness-dir=${HARNESS_DIR}" |
| 26 | + |
| 27 | +# kind.yaml uses paths relative to the harness dir (manifests/...). |
| 28 | +cd "${HARNESS_DIR}" |
| 29 | + |
| 30 | +# 1) Create the cluster (idempotent). |
| 31 | +if kind get clusters 2>/dev/null | grep -qx "${CLUSTER}"; then |
| 32 | + echo "[up.sh] cluster ${CLUSTER} already exists, reusing" |
| 33 | +else |
| 34 | + echo "[up.sh] creating kind cluster ${CLUSTER}" |
| 35 | + kind create cluster \ |
| 36 | + --name "${CLUSTER}" \ |
| 37 | + --image "${KIND_NODE_IMAGE}" \ |
| 38 | + --config manifests/kind.yaml |
| 39 | +fi |
| 40 | + |
| 41 | +CTX="kind-${CLUSTER}" |
| 42 | +NODE="${CLUSTER}-control-plane" |
| 43 | +STATIC_POD="kms-fake-plugin-${NODE}" |
| 44 | + |
| 45 | +# 2) Load our local images. |
| 46 | +echo "[up.sh] loading images into cluster" |
| 47 | +kind load docker-image kms-health-kind-fake-plugin:dev --name "${CLUSTER}" |
| 48 | +kind load docker-image kms-health-kind-monitor:dev --name "${CLUSTER}" |
| 49 | + |
| 50 | +# 3) Force the static pod to restart so it picks up the freshly-loaded |
| 51 | +# image (kubelet may have cached an ErrImageNeverPull from before). |
| 52 | +echo "[up.sh] bouncing fake-plugin container so it picks up loaded image" |
| 53 | +FAKE_CID="$(docker exec "${NODE}" crictl ps --name fake-plugin -q 2>/dev/null | head -1 || true)" |
| 54 | +if [ -n "${FAKE_CID}" ]; then |
| 55 | + docker exec "${NODE}" crictl stop "${FAKE_CID}" >/dev/null || true |
| 56 | +fi |
| 57 | + |
| 58 | +# 4) Apply namespace + RBAC + monitor Deployment. The status ConfigMap |
| 59 | +# is created by the writer itself on first observation; nothing |
| 60 | +# pre-creates it. |
| 61 | +echo "[up.sh] applying namespace, RBAC, monitor Deployment" |
| 62 | +kubectl --context "${CTX}" apply -f manifests/namespace.yaml |
| 63 | +kubectl --context "${CTX}" apply -f manifests/rbac.yaml |
| 64 | +kubectl --context "${CTX}" apply -f manifests/monitor-deployment.yaml |
| 65 | + |
| 66 | +# 5) Wait for the monitor Deployment to be Available. Apiserver is NOT |
| 67 | +# wired to KMS here (see manifests/kind.yaml) — we're only |
| 68 | +# validating the monitor, not apiserver/KMS integration. |
| 69 | +echo "[up.sh] waiting for monitor Deployment" |
| 70 | +kubectl --context "${CTX}" -n kms-health-test rollout status deployment/kms-health-monitor-fake --timeout=120s |
| 71 | + |
| 72 | +MONITOR_POD="$(kubectl --context "${CTX}" -n kms-health-test get pod \ |
| 73 | + -l app=kms-health-monitor-fake \ |
| 74 | + -o jsonpath='{.items[0].metadata.name}')" |
| 75 | +echo "[up.sh] done." |
| 76 | +echo " cluster: ${CLUSTER}" |
| 77 | +echo " monitor pod: ${MONITOR_POD} (kms-health-test)" |
| 78 | +echo " status cm: kms-health-fake (kms-health-test)" |
| 79 | +echo " fake plugin: ${STATIC_POD} (kube-system, static)" |
| 80 | +echo "" |
| 81 | +echo "Try:" |
| 82 | +echo " kubectl --context ${CTX} -n kms-health-test get cm kms-health-fake -o yaml" |
0 commit comments