From 040733473337c1f9c06e37699e34be9ee8afe7b3 Mon Sep 17 00:00:00 2001 From: sdairs Date: Thu, 16 Apr 2026 16:51:05 +0100 Subject: [PATCH 01/12] Add --debug flag to report winning cloud credential source Closes #48. Credentials can come from CLI flags, `.clickhouse/credentials.json`, env vars, or OAuth tokens, and it was hard to tell which one actually won precedence when debugging. `--debug` now prints the resolved source and API URL to stderr, so it works equally well with and without `--json`. `cloud auth status` also gains an `Active` column that marks the winning source, reusing the same resolution logic. --- README.md | 11 ++ crates/clickhousectl/src/cloud/cli.rs | 4 + crates/clickhousectl/src/cloud/client.rs | 144 +++++++++++++++++++++++ crates/clickhousectl/src/cloud/mod.rs | 2 +- crates/clickhousectl/src/main.rs | 32 +++++ 5 files changed, 192 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b33ec21..2e5079a 100644 --- a/README.md +++ b/README.md @@ -207,6 +207,17 @@ clickhousectl cloud auth logout # Clear all saved credentials (credentials.js Credential resolution order: CLI flags > `.clickhouse/credentials.json` > environment variables > OAuth tokens. +### Debugging which credential source was used + +Pass `--debug` to any `cloud` command to print the resolved credential source (and the API URL) to stderr before the command runs. This works with and without `--json`. + +```bash +clickhousectl cloud --debug service list +# [debug] auth source: credentials file (.clickhouse/credentials.json) +# [debug] api url: https://api.clickhouse.cloud/v1 +# ... normal output ... +``` + ## Cloud Manage ClickHouse Cloud services via the API. diff --git a/crates/clickhousectl/src/cloud/cli.rs b/crates/clickhousectl/src/cloud/cli.rs index 4e0c0d7..290e240 100644 --- a/crates/clickhousectl/src/cloud/cli.rs +++ b/crates/clickhousectl/src/cloud/cli.rs @@ -87,6 +87,10 @@ pub struct CloudArgs { #[arg(long, global = true)] pub json: bool, + /// Print debug info (e.g. the credential source used) to stderr before running the command + #[arg(long, global = true)] + pub debug: bool, + /// API base URL (default: auto-detect from OAuth tokens, or https://api.clickhouse.cloud) #[cfg_attr(debug_assertions, arg(long, global = true))] #[cfg_attr(not(debug_assertions), arg(long, global = true, hide = true))] diff --git a/crates/clickhousectl/src/cloud/client.rs b/crates/clickhousectl/src/cloud/client.rs index fbc8953..fe4466d 100644 --- a/crates/clickhousectl/src/cloud/client.rs +++ b/crates/clickhousectl/src/cloud/client.rs @@ -23,9 +23,88 @@ enum AuthMode { Bearer, } +/// The resolved credential source that won precedence for a `CloudClient`. +/// +/// Useful for debugging "which credential did we actually use?" questions. +/// See `CloudClient::auth_source`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AuthSource { + /// `--api-key` / `--api-secret` CLI flags + CliFlags, + /// Project-local `.clickhouse/credentials.json` + CredentialsFile, + /// `CLICKHOUSE_CLOUD_API_KEY` / `CLICKHOUSE_CLOUD_API_SECRET` env vars + EnvVars, + /// OAuth tokens saved by `cloud auth login` (`.clickhouse/tokens.json`) + OAuthTokens, +} + +/// Resolve the credential source that *would* win precedence if a `CloudClient` +/// were constructed right now, without actually creating one. +/// +/// Returns `None` if no usable credentials are configured. Mirrors the +/// precedence used by `CloudClient::new`: CLI flags > credentials file +/// > env vars > OAuth tokens. +pub fn resolve_active_auth_source( + api_key: Option<&str>, + api_secret: Option<&str>, +) -> Option { + if api_key.is_some() || api_secret.is_some() { + return Some(AuthSource::CliFlags); + } + if crate::cloud::credentials::load_credentials().is_some() { + return Some(AuthSource::CredentialsFile); + } + let env_key = env::var("CLICKHOUSE_CLOUD_API_KEY").ok(); + let env_secret = env::var("CLICKHOUSE_CLOUD_API_SECRET").ok(); + if env_key.is_some() && env_secret.is_some() { + return Some(AuthSource::EnvVars); + } + if let Some(tokens) = crate::cloud::auth::load_tokens() + && crate::cloud::auth::is_token_valid(&tokens) + { + return Some(AuthSource::OAuthTokens); + } + None +} + +impl AuthSource { + /// Short label for the source (useful for tables / compact output). + #[allow(dead_code)] + pub fn label(&self) -> &'static str { + match self { + AuthSource::CliFlags => "CLI flags", + AuthSource::CredentialsFile => "Credentials file", + AuthSource::EnvVars => "Env vars", + AuthSource::OAuthTokens => "OAuth", + } + } + + /// One-line description including the concrete source (flag, path, env var names). + pub fn describe(&self) -> String { + match self { + AuthSource::CliFlags => "CLI flags (--api-key, --api-secret)".to_string(), + AuthSource::CredentialsFile => format!( + "credentials file ({})", + crate::cloud::credentials::credentials_path().display() + ), + AuthSource::EnvVars => { + "environment variables (CLICKHOUSE_CLOUD_API_KEY, CLICKHOUSE_CLOUD_API_SECRET)" + .to_string() + } + AuthSource::OAuthTokens => format!( + "OAuth tokens ({})", + crate::cloud::auth::tokens_path().display() + ), + } + } +} + pub struct CloudClient { lib_client: clickhouse_cloud_api::Client, auth_mode: AuthMode, + auth_source: AuthSource, + base_url: String, } /// Convert CLI base URL (with /v1 suffix) to library base URL (without /v1). @@ -71,6 +150,8 @@ impl CloudClient { return Ok(Self { lib_client, auth_mode: AuthMode::Basic, + auth_source: AuthSource::CliFlags, + base_url, }); } @@ -89,6 +170,8 @@ impl CloudClient { return Ok(Self { lib_client, auth_mode: AuthMode::Basic, + auth_source: AuthSource::CredentialsFile, + base_url, }); } @@ -105,6 +188,8 @@ impl CloudClient { return Ok(Self { lib_client, auth_mode: AuthMode::Basic, + auth_source: AuthSource::EnvVars, + base_url, }); } @@ -123,6 +208,8 @@ impl CloudClient { return Ok(Self { lib_client, auth_mode: AuthMode::Bearer, + auth_source: AuthSource::OAuthTokens, + base_url, }); } @@ -137,6 +224,16 @@ impl CloudClient { matches!(self.auth_mode, AuthMode::Bearer) } + /// The credential source that won precedence when constructing this client. + pub fn auth_source(&self) -> AuthSource { + self.auth_source + } + + /// The API base URL the client is talking to (includes the `/v1` suffix). + pub fn base_url(&self) -> &str { + &self.base_url + } + /// Access the library client for migrated commands. pub fn api(&self) -> &clickhouse_cloud_api::Client { &self.lib_client @@ -742,6 +839,8 @@ mod tests { CloudClient { lib_client, auth_mode: AuthMode::Basic, + auth_source: AuthSource::CliFlags, + base_url: DEFAULT_BASE_URL.to_string(), } } @@ -756,6 +855,8 @@ mod tests { let client = CloudClient { lib_client, auth_mode: AuthMode::Bearer, + auth_source: AuthSource::OAuthTokens, + base_url: DEFAULT_BASE_URL.to_string(), }; assert!(client.is_bearer_auth()); } @@ -833,6 +934,8 @@ mod tests { let client = CloudClient { lib_client, auth_mode: AuthMode::Bearer, + auth_source: AuthSource::OAuthTokens, + base_url: DEFAULT_BASE_URL.to_string(), }; let err = client.convert_error(clickhouse_cloud_api::Error::Api { status: 403, @@ -841,6 +944,47 @@ mod tests { assert!(err.message.contains("Hint: You are authenticated via OAuth")); } + #[test] + fn auth_source_label_and_describe() { + assert_eq!(AuthSource::CliFlags.label(), "CLI flags"); + assert_eq!(AuthSource::CredentialsFile.label(), "Credentials file"); + assert_eq!(AuthSource::EnvVars.label(), "Env vars"); + assert_eq!(AuthSource::OAuthTokens.label(), "OAuth"); + + assert!(AuthSource::CliFlags.describe().contains("--api-key")); + assert!( + AuthSource::EnvVars + .describe() + .contains("CLICKHOUSE_CLOUD_API_KEY") + ); + assert!(AuthSource::CredentialsFile.describe().contains("credentials")); + assert!(AuthSource::OAuthTokens.describe().contains("OAuth")); + } + + #[test] + fn auth_source_accessor_returns_cli_flags_default_in_test_client() { + let client = test_client(); + assert_eq!(client.auth_source(), AuthSource::CliFlags); + assert_eq!(client.base_url(), DEFAULT_BASE_URL); + } + + #[test] + fn resolve_active_auth_source_cli_flags_take_precedence() { + // CLI flags must always win, regardless of other configured sources. + assert_eq!( + resolve_active_auth_source(Some("k"), Some("s")), + Some(AuthSource::CliFlags) + ); + assert_eq!( + resolve_active_auth_source(Some("k"), None), + Some(AuthSource::CliFlags) + ); + assert_eq!( + resolve_active_auth_source(None, Some("s")), + Some(AuthSource::CliFlags) + ); + } + #[test] fn convert_error_no_hint_for_403_basic() { let client = test_client(); diff --git a/crates/clickhousectl/src/cloud/mod.rs b/crates/clickhousectl/src/cloud/mod.rs index 26ef4ce..0d83ebd 100644 --- a/crates/clickhousectl/src/cloud/mod.rs +++ b/crates/clickhousectl/src/cloud/mod.rs @@ -8,4 +8,4 @@ pub mod types; #[cfg(test)] mod types_test; -pub use client::CloudClient; +pub use client::{AuthSource, CloudClient, resolve_active_auth_source}; diff --git a/crates/clickhousectl/src/main.rs b/crates/clickhousectl/src/main.rs index 6d2bc90..19185ea 100644 --- a/crates/clickhousectl/src/main.rs +++ b/crates/clickhousectl/src/main.rs @@ -185,8 +185,17 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { status: String, #[tabled(rename = "Scope")] scope: String, + #[tabled(rename = "Active")] + active: String, } + // Determine which source would actually win precedence right now. + // CLI --api-key/--api-secret aren't relevant to `auth status` itself. + let active = cloud::resolve_active_auth_source(None, None); + let mark = |src: cloud::AuthSource| -> String { + if active == Some(src) { "yes".into() } else { "-".into() } + }; + let mut rows = Vec::new(); match cloud::auth::load_tokens() { @@ -195,6 +204,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "OAuth".into(), status: "Active".into(), scope: "read-only".into(), + active: mark(cloud::AuthSource::OAuthTokens), }); } Some(_) => { @@ -202,6 +212,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "OAuth".into(), status: "Expired".into(), scope: "read-only".into(), + active: "-".into(), }); } None => { @@ -209,6 +220,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "OAuth".into(), status: "Not configured".into(), scope: "-".into(), + active: "-".into(), }); } } @@ -218,12 +230,14 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "API key".into(), status: "Active".into(), scope: "read/write".into(), + active: mark(cloud::AuthSource::CredentialsFile), }); } else { rows.push(AuthRow { auth_type: "API key".into(), status: "Not configured".into(), scope: "-".into(), + active: "-".into(), }); } @@ -235,6 +249,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "Env vars".into(), status: "Active".into(), scope: "read/write".into(), + active: mark(cloud::AuthSource::EnvVars), }); } (true, false) => { @@ -242,6 +257,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "Env vars".into(), status: "Incomplete (missing CLICKHOUSE_CLOUD_API_SECRET)".into(), scope: "-".into(), + active: "-".into(), }); } (false, true) => { @@ -249,6 +265,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "Env vars".into(), status: "Incomplete (missing CLICKHOUSE_CLOUD_API_KEY)".into(), scope: "-".into(), + active: "-".into(), }); } (false, false) => { @@ -256,10 +273,20 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { auth_type: "Env vars".into(), status: "Not configured".into(), scope: "-".into(), + active: "-".into(), }); } } + if args.debug { + match active { + Some(src) => { + eprintln!("[debug] auth source: {}", src.describe()); + } + None => eprintln!("[debug] auth source: none (no credentials configured)"), + } + } + if args.json { println!("{}", serde_json::to_string_pretty(&rows)?); } else { @@ -282,6 +309,11 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { ) .map_err(|e| Error::Cloud(e.to_string()))?; + if args.debug { + eprintln!("[debug] auth source: {}", client.auth_source().describe()); + eprintln!("[debug] api url: {}", client.base_url()); + } + // OAuth (Bearer) tokens are read-only. Block write commands early // to avoid fail loops where agents repeatedly hit 403 errors. if client.is_bearer_auth() && args.command.is_write_command() { From 3f44eb8c3ef7315806c29598ee6fda2fe63de0f5 Mon Sep 17 00:00:00 2001 From: sdairs Date: Thu, 16 Apr 2026 23:06:29 +0100 Subject: [PATCH 02/12] Add cloud postgres subcommand tree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plumbs all 13 ClickHouse Cloud managed Postgres operations into the CLI under `clickhousectl cloud postgres ...` — CRUD, lifecycle (restart/ promote/switchover), CA certs, runtime config (get/replace/patch with --set key=value overrides), password reset, read replica creation, and PITR restore. Lives in its own src/cloud/postgres.rs module with 33 new unit + parse tests and the full write-classification coverage. Closes #116 Co-Authored-By: Claude Opus 4.7 (1M context) --- Cargo.lock | 206 ++- README.md | 76 ++ crates/clickhousectl/Cargo.toml | 2 +- crates/clickhousectl/src/cli.rs | 4 + crates/clickhousectl/src/cloud/cli.rs | 34 +- crates/clickhousectl/src/cloud/commands.rs | 8 +- crates/clickhousectl/src/cloud/mod.rs | 1 + crates/clickhousectl/src/cloud/postgres.rs | 1418 ++++++++++++++++++++ crates/clickhousectl/src/main.rs | 215 +++ 9 files changed, 1956 insertions(+), 8 deletions(-) create mode 100644 crates/clickhousectl/src/cloud/postgres.rs diff --git a/Cargo.lock b/Cargo.lock index a46bb94..b0cd0aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,6 +76,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -456,6 +462,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -581,11 +593,24 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + [[package]] name = "h2" version = "0.4.13" @@ -605,6 +630,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + [[package]] name = "hashbrown" version = "0.17.0" @@ -835,6 +869,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "idna" version = "1.1.0" @@ -863,7 +903,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.17.0", + "serde", + "serde_core", ] [[package]] @@ -974,6 +1016,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" version = "0.2.183" @@ -1194,6 +1242,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -1296,6 +1354,12 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + [[package]] name = "rand" version = "0.9.2" @@ -1612,6 +1676,12 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" + [[package]] name = "serde" version = "1.0.228" @@ -2015,6 +2085,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "unit-prefix" version = "0.5.2" @@ -2063,6 +2139,7 @@ version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9" dependencies = [ + "getrandom 0.4.2", "js-sys", "serde_core", "wasm-bindgen", @@ -2102,6 +2179,15 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.114" @@ -2161,6 +2247,28 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + [[package]] name = "wasm-streams" version = "0.5.0" @@ -2174,6 +2282,18 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" version = "0.3.91" @@ -2530,6 +2650,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" diff --git a/README.md b/README.md index 2e5079a..4ebfc35 100644 --- a/README.md +++ b/README.md @@ -372,6 +372,82 @@ clickhousectl cloud service delete --force | `--private-preview-terms-checked` | Accept private preview terms when required | | `--enable-core-dumps` | Enable or disable service core dump collection | +### Postgres (beta) + +Manage ClickHouse Cloud managed Postgres services. All write commands require API key auth. + +```bash +# List / get +clickhousectl cloud postgres list +clickhousectl cloud postgres list --filter state=running +clickhousectl cloud postgres get + +# Create +clickhousectl cloud postgres create \ + --name my-pg \ + --region us-east-1 \ + --size m7i.2xlarge \ + --storage-gb 100 + +# Create with version + HA + tags + advanced config +clickhousectl cloud postgres create \ + --name my-pg \ + --region us-east-1 \ + --size m7i.2xlarge \ + --storage-gb 100 \ + --pg-version 17 \ + --ha-type sync \ + --tag env=prod \ + --pg-config-file ./pg.json + +# Update metadata (all flags optional) +clickhousectl cloud postgres update \ + --name renamed \ + --size m7i.4xlarge \ + --storage-gb 200 \ + --add-tag env=prod --remove-tag legacy + +# Delete +clickhousectl cloud postgres delete + +# CA certificates +clickhousectl cloud postgres certs get # raw PEM to stdout +clickhousectl cloud postgres certs get --output ca.pem # file (mode 0600 on unix) + +# Runtime configuration +clickhousectl cloud postgres config get +clickhousectl cloud postgres config replace --file cfg.json +clickhousectl cloud postgres config patch --set max_connections=500 --set random_page_cost=1.1 +clickhousectl cloud postgres config patch --file patch.json + +# Password +clickhousectl cloud postgres reset-password --password 'MyStr0ngPassword!' +clickhousectl cloud postgres reset-password --generate + +# Read replica and PITR restore +clickhousectl cloud postgres read-replica create --name replica-1 +clickhousectl cloud postgres restore --name restored --restore-target 2026-04-16T12:00:00Z + +# Lifecycle +clickhousectl cloud postgres restart +clickhousectl cloud postgres promote +clickhousectl cloud postgres switchover +``` + +**Postgres Create Options:** +| Option | Description | +|--------|-------------| +| `--name` | Service name (required) | +| `--region` | Cloud region, e.g. `us-east-1` (required) | +| `--size` | Instance size, e.g. `m7i.2xlarge` (required; server-validated) | +| `--storage-gb` | Storage size in GB (required) | +| `--provider` | Cloud provider (default: `aws`) | +| `--pg-version` | Postgres major version: `18`, `17`, `16` | +| `--ha-type` | High-availability: `none`, `async`, `sync` | +| `--tag` | Resource tag `key` or `key=value` (repeatable) | +| `--pg-config-file` | Path to JSON file with a `PgConfig` object | +| `--pg-bouncer-config-file` | Path to JSON file with a `PgBouncerConfig` object | + ### Backups ```bash diff --git a/crates/clickhousectl/Cargo.toml b/crates/clickhousectl/Cargo.toml index 27cc1b2..e138cb7 100644 --- a/crates/clickhousectl/Cargo.toml +++ b/crates/clickhousectl/Cargo.toml @@ -23,7 +23,7 @@ open = "5.3.3" url = "2.5.8" tabled = "0.20.0" clickhouse-cloud-api = { version = "0.1.0", path = "../clickhouse-cloud-api" } -uuid = "1.23.0" +uuid = { version = "1.23.0", features = ["v4"] } [dev-dependencies] tempfile = "3.27.0" diff --git a/crates/clickhousectl/src/cli.rs b/crates/clickhousectl/src/cli.rs index 434796c..1e704d5 100644 --- a/crates/clickhousectl/src/cli.rs +++ b/crates/clickhousectl/src/cli.rs @@ -5,6 +5,10 @@ pub use crate::cloud::cli::{ InvitationCommands, KeyCommands, MemberCommands, OrgCommands, PrivateEndpointCommands, QueryEndpointCommands, ServiceCommands, }; +pub use crate::cloud::postgres::{ + CertsCommands as PostgresCertsCommands, ConfigCommands as PostgresConfigCommands, + PostgresCommands, ReadReplicaCommands as PostgresReadReplicaCommands, +}; pub use crate::local::cli::LocalArgs; #[derive(Parser)] diff --git a/crates/clickhousectl/src/cloud/cli.rs b/crates/clickhousectl/src/cloud/cli.rs index 290e240..68b1fb5 100644 --- a/crates/clickhousectl/src/cloud/cli.rs +++ b/crates/clickhousectl/src/cloud/cli.rs @@ -9,7 +9,7 @@ fn parse_date_only(value: &str) -> Result { Ok(value.to_string()) } -fn parse_datetime(value: &str) -> Result { +pub(super) fn parse_datetime(value: &str) -> Result { if DateTime::::parse_from_rfc3339(value).is_err() { return Err(format!( "invalid datetime '{}': expected ISO 8601 / RFC 3339", @@ -175,6 +175,18 @@ CONTEXT FOR AGENTS: #[command(subcommand)] command: ActivityCommands, }, + + /// Manage ClickHouse Cloud Postgres services (beta) + #[command(after_help = "\ +CONTEXT FOR AGENTS: + Manage ClickHouse Cloud managed Postgres services. Subcommands cover CRUD, lifecycle + (restart/promote/switchover), CA certs, runtime config, password reset, read replicas, + and point-in-time restore. Service IDs come from `postgres list`. + Write commands require API key auth — OAuth is read-only.")] + Postgres { + #[command(subcommand)] + command: crate::cloud::postgres::PostgresCommands, + }, } impl CloudCommands { @@ -247,6 +259,7 @@ impl CloudCommands { ActivityCommands::List { .. } => false, ActivityCommands::Get { .. } => false, }, + CloudCommands::Postgres { command } => command.is_write(), } } } @@ -1561,6 +1574,12 @@ mod tests { // Private endpoint read assert_write(&["clickhousectl", "cloud", "service", "private-endpoint", "get-config", "svc-1"], false); + + // Postgres reads + assert_write(&["clickhousectl", "cloud", "postgres", "list"], false); + assert_write(&["clickhousectl", "cloud", "postgres", "get", "pg-1"], false); + assert_write(&["clickhousectl", "cloud", "postgres", "certs", "get", "pg-1"], false); + assert_write(&["clickhousectl", "cloud", "postgres", "config", "get", "pg-1"], false); } #[test] @@ -1599,5 +1618,18 @@ mod tests { // Private endpoint write assert_write(&["clickhousectl", "cloud", "service", "private-endpoint", "create", "svc-1", "--endpoint-id", "ep-1"], true); + + // Postgres writes + assert_write(&["clickhousectl", "cloud", "postgres", "create", "--name", "pg", "--region", "us-east-1", "--size", "m7i.2xlarge", "--storage-gb", "100"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "update", "pg-1", "--name", "renamed"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "delete", "pg-1"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "config", "replace", "pg-1", "--file", "/tmp/c.json"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "config", "patch", "pg-1", "--set", "max_connections=500"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "reset-password", "pg-1", "--generate"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "read-replica", "create", "pg-1", "--name", "r1"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "restore", "pg-1", "--name", "r", "--restore-target", "2026-04-16T12:00:00Z"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "restart", "pg-1"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "promote", "pg-1"], true); + assert_write(&["clickhousectl", "cloud", "postgres", "switchover", "pg-1"], true); } } diff --git a/crates/clickhousectl/src/cloud/commands.rs b/crates/clickhousectl/src/cloud/commands.rs index b083589..72ceefa 100644 --- a/crates/clickhousectl/src/cloud/commands.rs +++ b/crates/clickhousectl/src/cloud/commands.rs @@ -63,7 +63,7 @@ const KNOWN_PROFILES: &[&str] = &[ ]; /// Resolve org ID from explicit arg or auto-detect -async fn resolve_org_id( +pub(super) async fn resolve_org_id( client: &CloudClient, org_id: Option<&str>, ) -> Result> { @@ -107,7 +107,7 @@ async fn resolve_service( /// Parse a string into a library enum via serde deserialization, with client-side /// validation against a known-values list. Library enums have an `Unknown(String)` /// catch-all that prevents serde from ever failing, so we validate first. -fn parse_serde_enum( +pub(super) fn parse_serde_enum( value: &str, field: &str, known_values: &[&str], @@ -125,7 +125,7 @@ fn parse_serde_enum( .map_err(|e| format!("invalid {}: {}", field, e).into()) } -fn parse_tag(value: &str) -> Result> { +pub(super) fn parse_tag(value: &str) -> Result> { match value.split_once('=') { Some((key, tag_value)) => { let key = key.trim(); @@ -152,7 +152,7 @@ fn parse_tag(value: &str) -> Result> } } -fn parse_tags( +pub(super) fn parse_tags( values: &[String], ) -> Result>, Box> { if values.is_empty() { diff --git a/crates/clickhousectl/src/cloud/mod.rs b/crates/clickhousectl/src/cloud/mod.rs index 0d83ebd..e9e4335 100644 --- a/crates/clickhousectl/src/cloud/mod.rs +++ b/crates/clickhousectl/src/cloud/mod.rs @@ -3,6 +3,7 @@ pub mod cli; pub mod client; pub mod commands; pub mod credentials; +pub mod postgres; pub mod types; #[cfg(test)] diff --git a/crates/clickhousectl/src/cloud/postgres.rs b/crates/clickhousectl/src/cloud/postgres.rs new file mode 100644 index 0000000..d3c3428 --- /dev/null +++ b/crates/clickhousectl/src/cloud/postgres.rs @@ -0,0 +1,1418 @@ +use crate::cloud::client::CloudClient; +use crate::cloud::cli::parse_datetime; +use crate::cloud::commands::{parse_serde_enum, parse_tags, resolve_org_id}; +use clap::Subcommand; +use clickhouse_cloud_api::models::{ + ApiResponse, PgConfig, PgHaType, PgProvider, PgVersion, PostgresInstanceConfig, + PostgresService, PostgresServiceListItem, PostgresServicePatchRequest, + PostgresServicePostRequest, PostgresServiceReadReplicaRequest, PostgresServiceRestoreRequest, + PostgresServiceSetPassword, PostgresServiceSetState, PostgresServiceSetStateCommand, + ResourceTagsV1, +}; +use serde::de::DeserializeOwned; +use std::path::{Path, PathBuf}; +use tabled::{Table, Tabled, settings::Style}; + +const KNOWN_PG_PROVIDERS: &[&str] = &["aws"]; +const KNOWN_PG_VERSIONS: &[&str] = &["18", "17", "16"]; +const KNOWN_PG_HA_TYPES: &[&str] = &["none", "async", "sync"]; + +#[derive(Subcommand)] +pub enum PostgresCommands { + /// List Postgres services in the organization + List { + #[arg(long)] + org_id: Option, + /// Filter results by field (e.g. --filter state=running) + #[arg(long)] + filter: Vec, + }, + + /// Get details for a single Postgres service + Get { + postgres_id: String, + #[arg(long)] + org_id: Option, + }, + + /// Create a new Postgres service + Create { + /// Service name + #[arg(long)] + name: String, + /// Cloud region (e.g. us-east-1) + #[arg(long)] + region: String, + /// Instance size (e.g. m7i.2xlarge). Server validates — accepts any value. + #[arg(long)] + size: String, + /// Storage size in GB + #[arg(long)] + storage_gb: i64, + /// Cloud provider + #[arg(long, default_value = "aws")] + provider: String, + /// Postgres major version + #[arg(long, value_parser = clap::builder::PossibleValuesParser::new(KNOWN_PG_VERSIONS))] + pg_version: Option, + /// High-availability type + #[arg(long, value_parser = clap::builder::PossibleValuesParser::new(KNOWN_PG_HA_TYPES))] + ha_type: Option, + /// Resource tag (repeatable), e.g. --tag env=prod + #[arg(long)] + tag: Vec, + /// Path to a JSON file with a PgConfig object + #[arg(long)] + pg_config_file: Option, + /// Path to a JSON file with a PgBouncerConfig object + #[arg(long)] + pg_bouncer_config_file: Option, + #[arg(long)] + org_id: Option, + }, + + /// Update an existing Postgres service (metadata only) + Update { + postgres_id: String, + #[arg(long)] + name: Option, + #[arg(long)] + region: Option, + #[arg(long)] + size: Option, + #[arg(long)] + storage_gb: Option, + #[arg(long)] + provider: Option, + #[arg(long, value_parser = clap::builder::PossibleValuesParser::new(KNOWN_PG_VERSIONS))] + pg_version: Option, + #[arg(long, value_parser = clap::builder::PossibleValuesParser::new(KNOWN_PG_HA_TYPES))] + ha_type: Option, + /// Add a tag (repeatable), e.g. --add-tag env=prod + #[arg(long)] + add_tag: Vec, + /// Remove a tag by key (repeatable) + #[arg(long)] + remove_tag: Vec, + #[arg(long)] + org_id: Option, + }, + + /// Delete a Postgres service + Delete { + postgres_id: String, + #[arg(long)] + org_id: Option, + }, + + /// Manage CA certificates + #[command(subcommand)] + Certs(CertsCommands), + + /// Manage Postgres runtime configuration + #[command(subcommand)] + Config(ConfigCommands), + + /// Reset the Postgres service password + ResetPassword { + postgres_id: String, + /// New password (min 12, must include upper, lower, digit) + #[arg(long, conflicts_with = "generate")] + password: Option, + /// Generate a random compliant password and print it + #[arg(long, conflicts_with = "password")] + generate: bool, + #[arg(long)] + org_id: Option, + }, + + /// Manage read replicas + #[command(name = "read-replica", subcommand)] + ReadReplica(ReadReplicaCommands), + + /// Restore a Postgres service to a point in time + Restore { + /// Source Postgres service ID + postgres_id: String, + /// Name for the restored service + #[arg(long)] + name: String, + /// Point-in-time target (ISO 8601 / RFC 3339, e.g. 2026-04-16T12:00:00Z) + #[arg(long, value_parser = parse_datetime)] + restore_target: String, + #[arg(long)] + tag: Vec, + #[arg(long)] + pg_config_file: Option, + #[arg(long)] + pg_bouncer_config_file: Option, + #[arg(long)] + org_id: Option, + }, + + /// Restart a Postgres service + Restart { + postgres_id: String, + #[arg(long)] + org_id: Option, + }, + + /// Promote a read replica to primary + Promote { + postgres_id: String, + #[arg(long)] + org_id: Option, + }, + + /// Switch over between primary and replica + Switchover { + postgres_id: String, + #[arg(long)] + org_id: Option, + }, +} + +#[derive(Subcommand)] +pub enum CertsCommands { + /// Get the CA certificate bundle (PEM) for a Postgres service + Get { + postgres_id: String, + /// Write PEM to the given file (mode 0600 on unix) instead of stdout + #[arg(long)] + output: Option, + #[arg(long)] + org_id: Option, + }, +} + +#[derive(Subcommand)] +pub enum ConfigCommands { + /// Get current runtime configuration (pgConfig + pgBouncerConfig) + Get { + postgres_id: String, + #[arg(long)] + org_id: Option, + }, + /// Replace the entire runtime configuration + Replace { + postgres_id: String, + /// JSON file with a full PostgresInstanceConfig object + #[arg(long)] + file: PathBuf, + #[arg(long)] + org_id: Option, + }, + /// Patch selected runtime configuration fields + Patch { + postgres_id: String, + /// Set a pgConfig field (repeatable), e.g. --set max_connections=500 + #[arg(long = "set", conflicts_with = "file")] + sets: Vec, + /// JSON file with a partial PostgresInstanceConfig object + #[arg(long, conflicts_with = "sets")] + file: Option, + #[arg(long)] + org_id: Option, + }, +} + +#[derive(Subcommand)] +pub enum ReadReplicaCommands { + /// Create a read replica of an existing Postgres service + Create { + /// Source Postgres service ID + postgres_id: String, + /// Name for the new replica + #[arg(long)] + name: String, + #[arg(long)] + tag: Vec, + #[arg(long)] + pg_config_file: Option, + #[arg(long)] + pg_bouncer_config_file: Option, + #[arg(long)] + org_id: Option, + }, +} + +impl PostgresCommands { + pub fn is_write(&self) -> bool { + match self { + PostgresCommands::List { .. } | PostgresCommands::Get { .. } => false, + PostgresCommands::Certs(CertsCommands::Get { .. }) => false, + PostgresCommands::Config(ConfigCommands::Get { .. }) => false, + + PostgresCommands::Create { .. } + | PostgresCommands::Update { .. } + | PostgresCommands::Delete { .. } + | PostgresCommands::ResetPassword { .. } + | PostgresCommands::Restore { .. } + | PostgresCommands::Restart { .. } + | PostgresCommands::Promote { .. } + | PostgresCommands::Switchover { .. } => true, + PostgresCommands::Config(ConfigCommands::Replace { .. }) + | PostgresCommands::Config(ConfigCommands::Patch { .. }) => true, + PostgresCommands::ReadReplica(ReadReplicaCommands::Create { .. }) => true, + } + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn unwrap_api(resp: ApiResponse) -> Result> { + resp.result + .ok_or_else(|| "API response was missing a result body".into()) +} + +fn parse_pg_size(value: &str) -> Result> { + serde_json::from_value(serde_json::Value::String(value.to_string())) + .map_err(|e| format!("invalid size '{}': {}", value, e).into()) +} + +fn load_json_file(path: &Path) -> Result> { + let contents = std::fs::read_to_string(path) + .map_err(|e| format!("failed to read {}: {}", path.display(), e))?; + serde_json::from_str(&contents) + .map_err(|e| format!("failed to parse {} as JSON: {}", path.display(), e).into()) +} + +/// Parse `--set key=value` overrides into a JSON object. +/// +/// Each value is parsed as JSON first (so `max_connections=500` becomes a number), +/// falling back to a string if JSON parsing fails (`statement_timeout=5s`). +pub(super) fn parse_pg_config_overrides( + sets: &[String], +) -> Result, Box> { + let mut out = serde_json::Map::new(); + for entry in sets { + let (key, val) = entry + .split_once('=') + .ok_or_else(|| format!("invalid --set '{}': expected key=value", entry))?; + let key = key.trim(); + if key.is_empty() { + return Err(format!("invalid --set '{}': key cannot be empty", entry).into()); + } + let parsed = serde_json::from_str::(val) + .unwrap_or_else(|_| serde_json::Value::String(val.to_string())); + out.insert(key.to_string(), parsed); + } + Ok(out) +} + +fn generate_compliant_password() -> String { + // Two UUIDv4s give 64 cryptographically-random hex chars (lowercase + digits). + // Prefix "A1" ensures uppercase + digit presence; overall length 66, min-12 satisfied. + let u1 = uuid::Uuid::new_v4().simple().to_string(); + let u2 = uuid::Uuid::new_v4().simple().to_string(); + format!("A1{}{}", u1, u2) +} + +fn validate_password(pw: &str) -> Result<(), Box> { + if pw.len() < 12 { + return Err("password must be at least 12 characters".into()); + } + let has_lower = pw.chars().any(|c| c.is_ascii_lowercase()); + let has_upper = pw.chars().any(|c| c.is_ascii_uppercase()); + let has_digit = pw.chars().any(|c| c.is_ascii_digit()); + if !(has_lower && has_upper && has_digit) { + return Err( + "password must include at least one lowercase, one uppercase, and one digit".into(), + ); + } + Ok(()) +} + +fn write_pem_file(path: &Path, pem: &str) -> Result<(), Box> { + use std::io::Write; + #[cfg(unix)] + { + use std::os::unix::fs::OpenOptionsExt; + let mut f = std::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .mode(0o600) + .open(path)?; + f.write_all(pem.as_bytes())?; + } + #[cfg(not(unix))] + { + let mut f = std::fs::File::create(path)?; + f.write_all(pem.as_bytes())?; + } + Ok(()) +} + +fn apply_filter(item: &PostgresServiceListItem, filters: &[String]) -> bool { + for filter in filters { + let Some((key, val)) = filter.split_once('=') else { + continue; + }; + let matches = match key.trim() { + "state" => format!("{:?}", item.state).eq_ignore_ascii_case(val), + "region" => item.region == val, + "name" => item.name == val, + "provider" => format!("{:?}", item.provider).eq_ignore_ascii_case(val), + _ => true, + }; + if !matches { + return false; + } + } + true +} + +fn state_label(s: &clickhouse_cloud_api::models::PgStateProperty) -> String { + serde_json::to_value(s) + .ok() + .and_then(|v| v.as_str().map(|s| s.to_string())) + .unwrap_or_else(|| format!("{:?}", s)) +} + +fn enum_label(v: &T) -> String { + serde_json::to_value(v) + .ok() + .and_then(|v| v.as_str().map(|s| s.to_string())) + .unwrap_or_default() +} + +fn render_postgres_service(svc: &PostgresService) { + println!(" ID: {}", svc.id); + println!(" Name: {}", svc.name); + println!(" State: {}", state_label(&svc.state)); + println!(" Provider: {}", enum_label(&svc.provider)); + println!(" Region: {}", svc.region); + println!(" Size: {}", enum_label(&svc.size)); + println!(" Storage (GB): {}", svc.storage_size); + println!(" PG version: {}", enum_label(&svc.postgres_version)); + println!(" HA type: {}", enum_label(&svc.ha_type)); + println!(" Primary: {}", svc.is_primary); + println!(" Host: {}", svc.hostname); + println!(" Username: {}", svc.username); + println!(" Created: {}", svc.created_at.to_rfc3339()); + if !svc.tags.is_empty() { + let tags: Vec = svc + .tags + .iter() + .map(|t| match &t.value { + Some(v) => format!("{}={}", t.key, v), + None => t.key.clone(), + }) + .collect(); + println!(" Tags: {}", tags.join(", ")); + } +} + +fn merge_tags( + existing: &[ResourceTagsV1], + add: &[ResourceTagsV1], + remove_keys: &[String], +) -> Vec { + let remove: std::collections::HashSet<&str> = remove_keys.iter().map(|s| s.as_str()).collect(); + let add_keys: std::collections::HashSet<&str> = add.iter().map(|t| t.key.as_str()).collect(); + + let mut merged: Vec = existing + .iter() + .filter(|t| !remove.contains(t.key.as_str()) && !add_keys.contains(t.key.as_str())) + .cloned() + .collect(); + merged.extend(add.iter().cloned()); + merged +} + +// --------------------------------------------------------------------------- +// Option structs (for commands with many args) +// --------------------------------------------------------------------------- + +pub struct PostgresCreateOptions<'a> { + pub name: &'a str, + pub region: &'a str, + pub size: &'a str, + pub storage_gb: i64, + pub provider: &'a str, + pub pg_version: Option<&'a str>, + pub ha_type: Option<&'a str>, + pub tags: &'a [String], + pub pg_config_file: Option<&'a Path>, + pub pg_bouncer_config_file: Option<&'a Path>, + pub org_id: Option<&'a str>, +} + +pub struct PostgresUpdateOptions<'a> { + pub name: Option<&'a str>, + pub region: Option<&'a str>, + pub size: Option<&'a str>, + pub storage_gb: Option, + pub provider: Option<&'a str>, + pub pg_version: Option<&'a str>, + pub ha_type: Option<&'a str>, + pub add_tag: &'a [String], + pub remove_tag: &'a [String], + pub org_id: Option<&'a str>, +} + +pub struct PostgresReadReplicaOptions<'a> { + pub name: &'a str, + pub tags: &'a [String], + pub pg_config_file: Option<&'a Path>, + pub pg_bouncer_config_file: Option<&'a Path>, + pub org_id: Option<&'a str>, +} + +pub struct PostgresRestoreOptions<'a> { + pub name: &'a str, + pub restore_target: &'a str, + pub tags: &'a [String], + pub pg_config_file: Option<&'a Path>, + pub pg_bouncer_config_file: Option<&'a Path>, + pub org_id: Option<&'a str>, +} + +// --------------------------------------------------------------------------- +// Handlers +// --------------------------------------------------------------------------- + +pub async fn postgres_list( + client: &CloudClient, + org_id: Option<&str>, + filters: &[String], + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let resp = client + .api() + .postgres_service_get_list(&org_id) + .await + .map_err(|e| client.convert_error(e))?; + let items = unwrap_api(resp)?; + let filtered: Vec = items + .into_iter() + .filter(|i| apply_filter(i, filters)) + .collect(); + + if json { + println!("{}", serde_json::to_string_pretty(&filtered)?); + return Ok(()); + } + + if filtered.is_empty() { + println!("No Postgres services found"); + return Ok(()); + } + + #[derive(Tabled)] + struct Row { + #[tabled(rename = "Name")] + name: String, + #[tabled(rename = "ID")] + id: String, + #[tabled(rename = "State")] + state: String, + #[tabled(rename = "Region")] + region: String, + #[tabled(rename = "Size")] + size: String, + #[tabled(rename = "PG")] + pg: String, + #[tabled(rename = "HA")] + ha: String, + #[tabled(rename = "Primary")] + primary: String, + } + + let rows: Vec = filtered + .into_iter() + .map(|i| Row { + name: i.name.clone(), + id: i.id.to_string(), + state: state_label(&i.state), + region: i.region.clone(), + size: enum_label(&i.size), + pg: enum_label(&i.postgres_version), + ha: enum_label(&i.ha_type), + primary: if i.is_primary { "yes" } else { "no" }.to_string(), + }) + .collect(); + + println!("{}", Table::new(rows).with(Style::rounded())); + Ok(()) +} + +pub async fn postgres_get( + client: &CloudClient, + postgres_id: &str, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let resp = client + .api() + .postgres_service_get(&org_id, postgres_id) + .await + .map_err(|e| client.convert_error(e))?; + let svc = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&svc)?); + } else { + render_postgres_service(&svc); + if !svc.connection_string.is_empty() { + println!(" Connection string: {}", svc.connection_string); + } + } + Ok(()) +} + +pub async fn postgres_create( + client: &CloudClient, + opts: PostgresCreateOptions<'_>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, opts.org_id).await?; + + let provider: PgProvider = parse_serde_enum(opts.provider, "provider", KNOWN_PG_PROVIDERS)?; + let size = parse_pg_size(opts.size)?; + let pg_version: Option = opts + .pg_version + .map(|v| parse_serde_enum(v, "pg-version", KNOWN_PG_VERSIONS)) + .transpose()?; + let ha_type: Option = opts + .ha_type + .map(|v| parse_serde_enum(v, "ha-type", KNOWN_PG_HA_TYPES)) + .transpose()?; + let tags = parse_tags(opts.tags)?; + let pg_config = opts + .pg_config_file + .map(load_json_file::) + .transpose()?; + let pg_bouncer_config = opts + .pg_bouncer_config_file + .map(load_json_file::) + .transpose()?; + + let req = PostgresServicePostRequest { + name: opts.name.to_string(), + provider, + region: opts.region.to_string(), + size, + storage_size: opts.storage_gb, + postgres_version: pg_version, + ha_type, + tags, + pg_config, + pg_bouncer_config, + }; + + let resp = client + .api() + .postgres_service_create(&org_id, &req) + .await + .map_err(|e| client.convert_error(e))?; + let svc = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&svc)?); + } else { + println!("Postgres service created"); + println!(); + render_postgres_service(&svc); + println!(); + println!("Credentials (save these — password shown only once):"); + println!(" Username: {}", svc.username); + println!(" Password: {}", svc.password); + if !svc.connection_string.is_empty() { + println!(" Connection string: {}", svc.connection_string); + } + } + Ok(()) +} + +pub async fn postgres_update( + client: &CloudClient, + postgres_id: &str, + opts: PostgresUpdateOptions<'_>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, opts.org_id).await?; + + let provider = opts + .provider + .map(|v| parse_serde_enum::(v, "provider", KNOWN_PG_PROVIDERS)) + .transpose()?; + let size = opts.size.map(parse_pg_size).transpose()?; + let pg_version = opts + .pg_version + .map(|v| parse_serde_enum::(v, "pg-version", KNOWN_PG_VERSIONS)) + .transpose()?; + let ha_type = opts + .ha_type + .map(|v| parse_serde_enum::(v, "ha-type", KNOWN_PG_HA_TYPES)) + .transpose()?; + + // Merge tag add/remove against current tags if any tag changes requested. + let tags = if !opts.add_tag.is_empty() || !opts.remove_tag.is_empty() { + let current = client + .api() + .postgres_service_get(&org_id, postgres_id) + .await + .map_err(|e| client.convert_error(e))?; + let current = unwrap_api(current)?; + let add = parse_tags(opts.add_tag)?.unwrap_or_default(); + Some(merge_tags(¤t.tags, &add, opts.remove_tag)) + } else { + None + }; + + let req = PostgresServicePatchRequest { + name: opts.name.map(|s| s.to_string()), + provider, + region: opts.region.map(|s| s.to_string()), + size, + storage_size: opts.storage_gb, + postgres_version: pg_version, + ha_type, + tags, + }; + + let resp = client + .api() + .postgres_service_patch(&org_id, postgres_id, &req) + .await + .map_err(|e| client.convert_error(e))?; + let svc = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&svc)?); + } else { + println!("Postgres service updated"); + println!(); + render_postgres_service(&svc); + } + Ok(()) +} + +pub async fn postgres_delete( + client: &CloudClient, + postgres_id: &str, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let resp = client + .api() + .postgres_service_delete(&org_id, postgres_id) + .await + .map_err(|e| client.convert_error(e))?; + + if json { + println!("{}", serde_json::to_string_pretty(&resp)?); + } else { + println!("Postgres service {} deletion initiated", postgres_id); + } + Ok(()) +} + +pub async fn postgres_certs_get( + client: &CloudClient, + postgres_id: &str, + output: Option<&Path>, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let pem = client + .api() + .postgres_service_certs_get(&org_id, postgres_id) + .await + .map_err(|e| client.convert_error(e))?; + + if let Some(path) = output { + write_pem_file(path, &pem)?; + if json { + println!( + "{}", + serde_json::to_string_pretty(&serde_json::json!({ + "path": path.display().to_string(), + }))? + ); + } else { + println!("Wrote CA certificate to {}", path.display()); + } + return Ok(()); + } + + if json { + println!( + "{}", + serde_json::to_string_pretty(&serde_json::json!({ "certificate": pem }))? + ); + } else { + print!("{}", pem); + if !pem.ends_with('\n') { + println!(); + } + } + Ok(()) +} + +pub async fn postgres_config_get( + client: &CloudClient, + postgres_id: &str, + org_id: Option<&str>, + _json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let resp = client + .api() + .postgres_instance_config_get(&org_id, postgres_id) + .await + .map_err(|e| client.convert_error(e))?; + let cfg = unwrap_api(resp)?; + // Config is a flat 20+ field object — always emit as JSON (pretty). + println!("{}", serde_json::to_string_pretty(&cfg)?); + Ok(()) +} + +pub async fn postgres_config_replace( + client: &CloudClient, + postgres_id: &str, + file: &Path, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let cfg: PostgresInstanceConfig = load_json_file(file)?; + let resp = client + .api() + .postgres_instance_config_post(&org_id, postgres_id, &cfg) + .await + .map_err(|e| client.convert_error(e))?; + let out = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&out)?); + } else { + println!("Configuration replaced"); + if let Some(msg) = &out.message { + println!("Note: {}", msg); + } + } + Ok(()) +} + +pub async fn postgres_config_patch( + client: &CloudClient, + postgres_id: &str, + sets: &[String], + file: Option<&Path>, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + + if sets.is_empty() && file.is_none() { + return Err("provide --set key=value... or --file PATH".into()); + } + + let cfg: PostgresInstanceConfig = if let Some(path) = file { + load_json_file(path)? + } else { + // Build a PostgresInstanceConfig from --set entries by constructing + // a JSON object { "pgConfig": { ... overrides ... }, "pgBouncerConfig": {} } + // and deserializing, which merges with #[serde(default)] field defaults. + let overrides = parse_pg_config_overrides(sets)?; + let wrapper = serde_json::json!({ + "pgConfig": serde_json::Value::Object(overrides), + "pgBouncerConfig": {}, + }); + serde_json::from_value(wrapper) + .map_err(|e| format!("failed to build config from --set entries: {}", e))? + }; + + let resp = client + .api() + .postgres_instance_config_patch(&org_id, postgres_id, &cfg) + .await + .map_err(|e| client.convert_error(e))?; + let out = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&out)?); + } else { + println!("Configuration patched"); + if let Some(msg) = &out.message { + println!("Note: {}", msg); + } + } + Ok(()) +} + +pub async fn postgres_reset_password( + client: &CloudClient, + postgres_id: &str, + password: Option<&str>, + generate: bool, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + + let pw = match (password, generate) { + (Some(p), false) => { + validate_password(p)?; + p.to_string() + } + (None, true) => generate_compliant_password(), + (None, false) => return Err("provide --password VALUE or --generate".into()), + (Some(_), true) => unreachable!("clap conflicts_with prevents this"), + }; + + let req = PostgresServiceSetPassword { + password: pw.clone(), + }; + let resp = client + .api() + .postgres_service_set_password(&org_id, postgres_id, &req) + .await + .map_err(|e| client.convert_error(e))?; + let out = unwrap_api(resp)?; + + if json { + // Return the password that was set (the API also echoes it back, but always + // emit what the user now needs to use). + println!( + "{}", + serde_json::to_string_pretty(&serde_json::json!({ + "password": out.password, + }))? + ); + } else { + println!("Password reset successfully"); + if generate { + println!(); + println!("Generated password (save this — not recoverable):"); + println!(" {}", out.password); + } + } + Ok(()) +} + +pub async fn postgres_read_replica_create( + client: &CloudClient, + postgres_id: &str, + opts: PostgresReadReplicaOptions<'_>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, opts.org_id).await?; + let tags = parse_tags(opts.tags)?; + let pg_config = opts + .pg_config_file + .map(load_json_file::) + .transpose()?; + let pg_bouncer_config = opts + .pg_bouncer_config_file + .map(load_json_file::) + .transpose()?; + + let req = PostgresServiceReadReplicaRequest { + name: opts.name.to_string(), + tags, + pg_config, + pg_bouncer_config, + }; + + let resp = client + .api() + .postgres_instance_create_read_replica(&org_id, postgres_id, &req) + .await + .map_err(|e| client.convert_error(e))?; + let svc = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&svc)?); + } else { + println!("Read replica created"); + println!(); + render_postgres_service(&svc); + } + Ok(()) +} + +pub async fn postgres_restore( + client: &CloudClient, + postgres_id: &str, + opts: PostgresRestoreOptions<'_>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, opts.org_id).await?; + let tags = parse_tags(opts.tags)?; + let pg_config = opts + .pg_config_file + .map(load_json_file::) + .transpose()?; + let pg_bouncer_config = opts + .pg_bouncer_config_file + .map(load_json_file::) + .transpose()?; + let restore_target = chrono::DateTime::parse_from_rfc3339(opts.restore_target) + .map_err(|e| format!("invalid restore-target: {}", e))? + .with_timezone(&chrono::Utc); + + let req = PostgresServiceRestoreRequest { + name: opts.name.to_string(), + restore_target, + tags, + pg_config, + pg_bouncer_config, + }; + + let resp = client + .api() + .postgres_instance_restore(&org_id, postgres_id, &req) + .await + .map_err(|e| client.convert_error(e))?; + let svc = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&svc)?); + } else { + println!("Postgres service restore initiated"); + println!(); + render_postgres_service(&svc); + } + Ok(()) +} + +pub async fn postgres_state_change( + client: &CloudClient, + postgres_id: &str, + cmd: PostgresServiceSetStateCommand, + org_id: Option<&str>, + json: bool, +) -> Result<(), Box> { + let org_id = resolve_org_id(client, org_id).await?; + let req = PostgresServiceSetState { command: cmd }; + let resp = client + .api() + .postgres_service_patch_state(&org_id, postgres_id, &req) + .await + .map_err(|e| client.convert_error(e))?; + let svc = unwrap_api(resp)?; + + if json { + println!("{}", serde_json::to_string_pretty(&svc)?); + } else { + println!("State change accepted"); + println!(); + render_postgres_service(&svc); + } + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests (CLI parsing + helpers) +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{Cli, Commands}; + use crate::cloud::cli::CloudCommands; + use clap::Parser; + + fn parse_cloud(args: &[&str]) -> CloudCommands { + let cli = Cli::try_parse_from(args).expect("parse"); + match cli.command { + Commands::Cloud(a) => a.command, + _ => panic!("expected cloud command"), + } + } + + fn parse_postgres(args: &[&str]) -> PostgresCommands { + match parse_cloud(args) { + CloudCommands::Postgres { command } => command, + _ => panic!("expected postgres command"), + } + } + + #[test] + fn parses_postgres_list_with_filters() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "list", + "--filter", "state=running", + "--filter", "region=us-east-1", + ]); + let PostgresCommands::List { filter, .. } = cmd else { + panic!("expected list"); + }; + assert_eq!(filter, vec!["state=running", "region=us-east-1"]); + } + + #[test] + fn parses_postgres_get() { + let cmd = parse_postgres(&["clickhousectl", "cloud", "postgres", "get", "pg-1"]); + let PostgresCommands::Get { postgres_id, .. } = cmd else { + panic!("expected get"); + }; + assert_eq!(postgres_id, "pg-1"); + } + + #[test] + fn parses_postgres_create_minimal() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "create", + "--name", "pg1", + "--region", "us-east-1", + "--size", "m7i.2xlarge", + "--storage-gb", "100", + ]); + let PostgresCommands::Create { + name, region, size, storage_gb, provider, pg_version, ha_type, .. + } = cmd + else { + panic!("expected create"); + }; + assert_eq!(name, "pg1"); + assert_eq!(region, "us-east-1"); + assert_eq!(size, "m7i.2xlarge"); + assert_eq!(storage_gb, 100); + assert_eq!(provider, "aws"); + assert!(pg_version.is_none()); + assert!(ha_type.is_none()); + } + + #[test] + fn parses_postgres_create_with_all_flags() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "create", + "--name", "pg1", + "--region", "us-east-1", + "--size", "m7i.2xlarge", + "--storage-gb", "100", + "--pg-version", "17", + "--ha-type", "sync", + "--tag", "env=prod", + "--tag", "owner=data", + ]); + let PostgresCommands::Create { pg_version, ha_type, tag, .. } = cmd else { + panic!("expected create"); + }; + assert_eq!(pg_version.as_deref(), Some("17")); + assert_eq!(ha_type.as_deref(), Some("sync")); + assert_eq!(tag, vec!["env=prod", "owner=data"]); + } + + #[test] + fn rejects_postgres_create_missing_required() { + let err = Cli::try_parse_from([ + "clickhousectl", "cloud", "postgres", "create", + "--name", "pg1", + "--region", "us-east-1", + // missing --size and --storage-gb + ]) + .err().expect("expected parse error"); + assert!(err.to_string().contains("--size") || err.to_string().contains("--storage-gb")); + } + + #[test] + fn rejects_postgres_create_invalid_pg_version() { + let err = Cli::try_parse_from([ + "clickhousectl", "cloud", "postgres", "create", + "--name", "pg1", + "--region", "us-east-1", + "--size", "m7i.2xlarge", + "--storage-gb", "100", + "--pg-version", "15", + ]) + .err().expect("expected parse error"); + assert!(err.to_string().contains("invalid value")); + } + + #[test] + fn parses_postgres_update_tag_diff_flags() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "update", "pg-1", + "--name", "renamed", + "--add-tag", "env=prod", + "--add-tag", "team=data", + "--remove-tag", "old", + ]); + let PostgresCommands::Update { + postgres_id, name, add_tag, remove_tag, .. + } = cmd + else { + panic!("expected update"); + }; + assert_eq!(postgres_id, "pg-1"); + assert_eq!(name.as_deref(), Some("renamed")); + assert_eq!(add_tag, vec!["env=prod", "team=data"]); + assert_eq!(remove_tag, vec!["old"]); + } + + #[test] + fn parses_postgres_update_no_fields() { + let cmd = parse_postgres(&["clickhousectl", "cloud", "postgres", "update", "pg-1"]); + let PostgresCommands::Update { postgres_id, name, .. } = cmd else { + panic!("expected update"); + }; + assert_eq!(postgres_id, "pg-1"); + assert!(name.is_none()); + } + + #[test] + fn parses_postgres_delete() { + let cmd = parse_postgres(&["clickhousectl", "cloud", "postgres", "delete", "pg-1"]); + let PostgresCommands::Delete { postgres_id, .. } = cmd else { + panic!("expected delete"); + }; + assert_eq!(postgres_id, "pg-1"); + } + + #[test] + fn parses_postgres_certs_get_stdout_and_output() { + let cmd = parse_postgres(&["clickhousectl", "cloud", "postgres", "certs", "get", "pg-1"]); + let PostgresCommands::Certs(CertsCommands::Get { output, .. }) = cmd else { + panic!("expected certs get"); + }; + assert!(output.is_none()); + + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "certs", "get", "pg-1", + "--output", "/tmp/ca.pem", + ]); + let PostgresCommands::Certs(CertsCommands::Get { output, .. }) = cmd else { + panic!("expected certs get"); + }; + assert_eq!(output, Some(PathBuf::from("/tmp/ca.pem"))); + } + + #[test] + fn parses_postgres_config_get() { + let cmd = parse_postgres(&["clickhousectl", "cloud", "postgres", "config", "get", "pg-1"]); + assert!(matches!( + cmd, + PostgresCommands::Config(ConfigCommands::Get { .. }) + )); + } + + #[test] + fn parses_postgres_config_replace_requires_file() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "config", "replace", "pg-1", + "--file", "/tmp/cfg.json", + ]); + let PostgresCommands::Config(ConfigCommands::Replace { file, .. }) = cmd else { + panic!("expected replace"); + }; + assert_eq!(file, PathBuf::from("/tmp/cfg.json")); + + let err = Cli::try_parse_from([ + "clickhousectl", "cloud", "postgres", "config", "replace", "pg-1", + ]) + .err().expect("expected parse error"); + assert!(err.to_string().contains("--file")); + } + + #[test] + fn parses_postgres_config_patch_with_set_entries() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "config", "patch", "pg-1", + "--set", "max_connections=500", + "--set", "random_page_cost=1.1", + ]); + let PostgresCommands::Config(ConfigCommands::Patch { sets, file, .. }) = cmd else { + panic!("expected patch"); + }; + assert_eq!(sets, vec!["max_connections=500", "random_page_cost=1.1"]); + assert!(file.is_none()); + } + + #[test] + fn parses_postgres_config_patch_with_file() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "config", "patch", "pg-1", + "--file", "/tmp/p.json", + ]); + let PostgresCommands::Config(ConfigCommands::Patch { sets, file, .. }) = cmd else { + panic!("expected patch"); + }; + assert!(sets.is_empty()); + assert_eq!(file, Some(PathBuf::from("/tmp/p.json"))); + } + + #[test] + fn rejects_postgres_config_patch_set_and_file_together() { + let err = Cli::try_parse_from([ + "clickhousectl", "cloud", "postgres", "config", "patch", "pg-1", + "--set", "max_connections=500", + "--file", "/tmp/p.json", + ]) + .err().expect("expected parse error"); + assert!(err.to_string().contains("cannot be used")); + } + + #[test] + fn parses_postgres_reset_password_with_password_and_generate() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "reset-password", "pg-1", + "--password", "Hunter2345678", + ]); + let PostgresCommands::ResetPassword { password, generate, .. } = cmd else { + panic!("expected reset-password"); + }; + assert_eq!(password.as_deref(), Some("Hunter2345678")); + assert!(!generate); + + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "reset-password", "pg-1", + "--generate", + ]); + let PostgresCommands::ResetPassword { password, generate, .. } = cmd else { + panic!("expected reset-password"); + }; + assert!(password.is_none()); + assert!(generate); + } + + #[test] + fn rejects_postgres_reset_password_both() { + let err = Cli::try_parse_from([ + "clickhousectl", "cloud", "postgres", "reset-password", "pg-1", + "--password", "abc", + "--generate", + ]) + .err().expect("expected parse error"); + assert!(err.to_string().contains("cannot be used")); + } + + #[test] + fn parses_postgres_restore_valid_rfc3339() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "restore", "pg-1", + "--name", "restored", + "--restore-target", "2026-04-16T12:00:00Z", + ]); + let PostgresCommands::Restore { name, restore_target, .. } = cmd else { + panic!("expected restore"); + }; + assert_eq!(name, "restored"); + assert_eq!(restore_target, "2026-04-16T12:00:00Z"); + } + + #[test] + fn rejects_postgres_restore_invalid_datetime() { + let err = Cli::try_parse_from([ + "clickhousectl", "cloud", "postgres", "restore", "pg-1", + "--name", "restored", + "--restore-target", "yesterday", + ]) + .err().expect("expected parse error"); + assert!(err.to_string().contains("invalid datetime")); + } + + #[test] + fn parses_postgres_read_replica_create() { + let cmd = parse_postgres(&[ + "clickhousectl", "cloud", "postgres", "read-replica", "create", "pg-1", + "--name", "replica1", + "--tag", "role=read", + ]); + let PostgresCommands::ReadReplica(ReadReplicaCommands::Create { postgres_id, name, tag, .. }) = cmd else { + panic!("expected read-replica create"); + }; + assert_eq!(postgres_id, "pg-1"); + assert_eq!(name, "replica1"); + assert_eq!(tag, vec!["role=read"]); + } + + #[test] + fn parses_postgres_restart_promote_switchover() { + assert!(matches!( + parse_postgres(&["clickhousectl", "cloud", "postgres", "restart", "pg-1"]), + PostgresCommands::Restart { .. } + )); + assert!(matches!( + parse_postgres(&["clickhousectl", "cloud", "postgres", "promote", "pg-1"]), + PostgresCommands::Promote { .. } + )); + assert!(matches!( + parse_postgres(&["clickhousectl", "cloud", "postgres", "switchover", "pg-1"]), + PostgresCommands::Switchover { .. } + )); + } + + // --- helper unit tests --- + + #[test] + fn parse_pg_config_overrides_numeric_and_string() { + let m = parse_pg_config_overrides(&[ + "max_connections=500".into(), + "random_page_cost=1.1".into(), + "statement_timeout=5s".into(), + ]) + .unwrap(); + assert_eq!(m.get("max_connections"), Some(&serde_json::json!(500))); + assert_eq!(m.get("random_page_cost"), Some(&serde_json::json!(1.1))); + assert_eq!( + m.get("statement_timeout"), + Some(&serde_json::Value::String("5s".to_string())) + ); + } + + #[test] + fn parse_pg_config_overrides_rejects_malformed() { + assert!(parse_pg_config_overrides(&["no_equals".into()]).is_err()); + assert!(parse_pg_config_overrides(&["=value".into()]).is_err()); + } + + #[test] + fn parse_pg_config_overrides_last_wins_on_duplicates() { + let m = parse_pg_config_overrides(&[ + "max_connections=100".into(), + "max_connections=200".into(), + ]) + .unwrap(); + assert_eq!(m.get("max_connections"), Some(&serde_json::json!(200))); + } + + #[test] + fn validate_password_rules() { + assert!(validate_password("Short1").is_err()); + assert!(validate_password("alllowercase12345").is_err()); // no upper + assert!(validate_password("ALLUPPERCASE12345").is_err()); // no lower + assert!(validate_password("NoDigitsHereAtAll").is_err()); + assert!(validate_password("Valid1Password").is_ok()); + } + + #[test] + fn generated_password_is_compliant() { + let pw = generate_compliant_password(); + assert!(validate_password(&pw).is_ok()); + } + + #[test] + fn merge_tags_adds_and_removes() { + let existing = vec![ + ResourceTagsV1 { + key: "env".into(), + value: Some("dev".into()), + }, + ResourceTagsV1 { + key: "team".into(), + value: Some("data".into()), + }, + ]; + let add = vec![ResourceTagsV1 { + key: "env".into(), + value: Some("prod".into()), + }]; + let remove = vec!["team".to_string()]; + let out = merge_tags(&existing, &add, &remove); + assert_eq!(out.len(), 1); + assert_eq!(out[0].key, "env"); + assert_eq!(out[0].value.as_deref(), Some("prod")); + } +} diff --git a/crates/clickhousectl/src/main.rs b/crates/clickhousectl/src/main.rs index 19185ea..14df9c8 100644 --- a/crates/clickhousectl/src/main.rs +++ b/crates/clickhousectl/src/main.rs @@ -13,6 +13,7 @@ use clap::Parser; use cli::{ ActivityCommands, AuthCommands, BackupCommands, BackupConfigCommands, Cli, CloudArgs, CloudCommands, Commands, InvitationCommands, KeyCommands, MemberCommands, OrgCommands, + PostgresCertsCommands, PostgresCommands, PostgresConfigCommands, PostgresReadReplicaCommands, PrivateEndpointCommands, QueryEndpointCommands, ServiceCommands, SkillsArgs, UpdateArgs, }; use clap::error::ErrorKind; @@ -796,7 +797,221 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { .await } }, + CloudCommands::Postgres { command } => run_postgres(&client, command, json).await, }; result.map_err(|e| Error::Cloud(e.to_string())) } + +async fn run_postgres( + client: &CloudClient, + command: PostgresCommands, + json: bool, +) -> std::result::Result<(), Box> { + use clickhouse_cloud_api::models::PostgresServiceSetStateCommand; + use cloud::postgres::{ + self as pg, PostgresCreateOptions, PostgresReadReplicaOptions, PostgresRestoreOptions, + PostgresUpdateOptions, + }; + + match command { + PostgresCommands::List { org_id, filter } => { + pg::postgres_list(client, org_id.as_deref(), &filter, json).await + } + PostgresCommands::Get { + postgres_id, + org_id, + } => pg::postgres_get(client, &postgres_id, org_id.as_deref(), json).await, + PostgresCommands::Create { + name, + region, + size, + storage_gb, + provider, + pg_version, + ha_type, + tag, + pg_config_file, + pg_bouncer_config_file, + org_id, + } => { + let opts = PostgresCreateOptions { + name: &name, + region: ®ion, + size: &size, + storage_gb, + provider: &provider, + pg_version: pg_version.as_deref(), + ha_type: ha_type.as_deref(), + tags: &tag, + pg_config_file: pg_config_file.as_deref(), + pg_bouncer_config_file: pg_bouncer_config_file.as_deref(), + org_id: org_id.as_deref(), + }; + pg::postgres_create(client, opts, json).await + } + PostgresCommands::Update { + postgres_id, + name, + region, + size, + storage_gb, + provider, + pg_version, + ha_type, + add_tag, + remove_tag, + org_id, + } => { + let opts = PostgresUpdateOptions { + name: name.as_deref(), + region: region.as_deref(), + size: size.as_deref(), + storage_gb, + provider: provider.as_deref(), + pg_version: pg_version.as_deref(), + ha_type: ha_type.as_deref(), + add_tag: &add_tag, + remove_tag: &remove_tag, + org_id: org_id.as_deref(), + }; + pg::postgres_update(client, &postgres_id, opts, json).await + } + PostgresCommands::Delete { + postgres_id, + org_id, + } => pg::postgres_delete(client, &postgres_id, org_id.as_deref(), json).await, + PostgresCommands::Certs(PostgresCertsCommands::Get { + postgres_id, + output, + org_id, + }) => { + pg::postgres_certs_get( + client, + &postgres_id, + output.as_deref(), + org_id.as_deref(), + json, + ) + .await + } + PostgresCommands::Config(PostgresConfigCommands::Get { + postgres_id, + org_id, + }) => pg::postgres_config_get(client, &postgres_id, org_id.as_deref(), json).await, + PostgresCommands::Config(PostgresConfigCommands::Replace { + postgres_id, + file, + org_id, + }) => { + pg::postgres_config_replace(client, &postgres_id, &file, org_id.as_deref(), json).await + } + PostgresCommands::Config(PostgresConfigCommands::Patch { + postgres_id, + sets, + file, + org_id, + }) => { + pg::postgres_config_patch( + client, + &postgres_id, + &sets, + file.as_deref(), + org_id.as_deref(), + json, + ) + .await + } + PostgresCommands::ResetPassword { + postgres_id, + password, + generate, + org_id, + } => { + pg::postgres_reset_password( + client, + &postgres_id, + password.as_deref(), + generate, + org_id.as_deref(), + json, + ) + .await + } + PostgresCommands::ReadReplica(PostgresReadReplicaCommands::Create { + postgres_id, + name, + tag, + pg_config_file, + pg_bouncer_config_file, + org_id, + }) => { + let opts = PostgresReadReplicaOptions { + name: &name, + tags: &tag, + pg_config_file: pg_config_file.as_deref(), + pg_bouncer_config_file: pg_bouncer_config_file.as_deref(), + org_id: org_id.as_deref(), + }; + pg::postgres_read_replica_create(client, &postgres_id, opts, json).await + } + PostgresCommands::Restore { + postgres_id, + name, + restore_target, + tag, + pg_config_file, + pg_bouncer_config_file, + org_id, + } => { + let opts = PostgresRestoreOptions { + name: &name, + restore_target: &restore_target, + tags: &tag, + pg_config_file: pg_config_file.as_deref(), + pg_bouncer_config_file: pg_bouncer_config_file.as_deref(), + org_id: org_id.as_deref(), + }; + pg::postgres_restore(client, &postgres_id, opts, json).await + } + PostgresCommands::Restart { + postgres_id, + org_id, + } => { + pg::postgres_state_change( + client, + &postgres_id, + PostgresServiceSetStateCommand::Restart, + org_id.as_deref(), + json, + ) + .await + } + PostgresCommands::Promote { + postgres_id, + org_id, + } => { + pg::postgres_state_change( + client, + &postgres_id, + PostgresServiceSetStateCommand::Promote, + org_id.as_deref(), + json, + ) + .await + } + PostgresCommands::Switchover { + postgres_id, + org_id, + } => { + pg::postgres_state_change( + client, + &postgres_id, + PostgresServiceSetStateCommand::Switchover, + org_id.as_deref(), + json, + ) + .await + } + } +} From 2219dfb9b2355fbf30ab52f4c0dda927ba4c4ca7 Mon Sep 17 00:00:00 2001 From: sdairs Date: Fri, 17 Apr 2026 14:42:21 +0100 Subject: [PATCH 03/12] Add cloud Postgres integration test suite Mirrors the ClickHouse service lifecycle test (create, wait-running, list, certs, config get, PATCH tags, password reset, restart, delete) against the Postgres endpoints, wired into the scheduled Cloud Integration workflow. Password step treats a successful 200 as the pass condition: per the OpenAPI spec, PostgresServicePasswordResource.password is only populated when the request omits `password`, so the supplied-password path returns empty by design. Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/workflows/cloud-integration.yml | 3 + crates/clickhouse-cloud-api/README.md | 9 + .../tests/integration/mod.rs | 2 + .../tests/integration/support.rs | 90 ++++ .../tests/integration_postgres_test.rs | 447 ++++++++++++++++++ 5 files changed, 551 insertions(+) create mode 100644 crates/clickhouse-cloud-api/tests/integration_postgres_test.rs diff --git a/.github/workflows/cloud-integration.yml b/.github/workflows/cloud-integration.yml index 3cbe364..9aa7d93 100644 --- a/.github/workflows/cloud-integration.yml +++ b/.github/workflows/cloud-integration.yml @@ -42,3 +42,6 @@ jobs: - name: Run cloud integration suite run: cargo test -p clickhouse-cloud-api --test integration_test -- --ignored --nocapture + + - name: Run cloud Postgres integration suite + run: cargo test -p clickhouse-cloud-api --test integration_postgres_test -- --ignored --nocapture diff --git a/crates/clickhouse-cloud-api/README.md b/crates/clickhouse-cloud-api/README.md index d2a71cd..0db6cff 100644 --- a/crates/clickhouse-cloud-api/README.md +++ b/crates/clickhouse-cloud-api/README.md @@ -67,6 +67,15 @@ cargo test --test client_test # wiremock-based client tests cargo test --test models_test # serde round-trip tests ``` +Live-API lifecycle suites are `#[ignore]`d by default (they provision real resources): + +```bash +cargo test --test integration_test -- --ignored --nocapture # ClickHouse service CRUD +cargo test --test integration_postgres_test -- --ignored --nocapture # Postgres service CRUD +``` + +Both require `CLICKHOUSE_CLOUD_API_KEY`, `CLICKHOUSE_CLOUD_API_SECRET`, `CLICKHOUSE_CLOUD_TEST_ORG_ID`, `CLICKHOUSE_CLOUD_TEST_PROVIDER`, and `CLICKHOUSE_CLOUD_TEST_REGION` in the environment, and are wired into the scheduled `Cloud Integration` GitHub Actions workflow. + The `spec_coverage_test` suite checks three things against the checked-in spec: 1. Every OpenAPI operation has a matching `pub async fn` in `client.rs` diff --git a/crates/clickhouse-cloud-api/tests/integration/mod.rs b/crates/clickhouse-cloud-api/tests/integration/mod.rs index 05ed7c0..50de97f 100644 --- a/crates/clickhouse-cloud-api/tests/integration/mod.rs +++ b/crates/clickhouse-cloud-api/tests/integration/mod.rs @@ -1 +1,3 @@ +#![allow(dead_code)] + pub mod support; diff --git a/crates/clickhouse-cloud-api/tests/integration/support.rs b/crates/clickhouse-cloud-api/tests/integration/support.rs index f31d9e1..22df4b7 100644 --- a/crates/clickhouse-cloud-api/tests/integration/support.rs +++ b/crates/clickhouse-cloud-api/tests/integration/support.rs @@ -111,6 +111,35 @@ impl TestContext { format!("tag:run-id={}", self.run_id), ] } + + pub fn postgres_service_name(&self) -> String { + format!("clickhousectl-it-pg-{}", self.run_id) + } + + pub fn postgres_run_tags(&self) -> Vec { + vec![ + ResourceTagsV1 { + key: "managed-by".to_string(), + value: Some("clickhousectl-it".to_string()), + }, + ResourceTagsV1 { + key: "suite".to_string(), + value: Some("postgres-crud".to_string()), + }, + ResourceTagsV1 { + key: "run-id".to_string(), + value: Some(self.run_id.clone()), + }, + ] + } + + pub fn postgres_run_tag_filters(&self) -> Vec { + vec![ + "tag:managed-by=clickhousectl-it".to_string(), + "tag:suite=postgres-crud".to_string(), + format!("tag:run-id={}", self.run_id), + ] + } } pub fn create_client() -> TestResult { @@ -229,6 +258,7 @@ impl FailureRecorder { #[derive(Default)] pub struct CleanupRegistry { service_ids: Vec, + postgres_ids: Vec, } impl CleanupRegistry { @@ -241,6 +271,15 @@ impl CleanupRegistry { .retain(|registered| registered != service_id); } + pub fn register_postgres(&mut self, postgres_id: impl Into) { + self.postgres_ids.push(postgres_id.into()); + } + + pub fn unregister_postgres(&mut self, postgres_id: &str) { + self.postgres_ids + .retain(|registered| registered != postgres_id); + } + pub async fn cleanup(&mut self, client: &Client, org_id: &str, delete_timeout: Duration, poll_interval: Duration) -> Result<(), String> { let mut failures = Vec::new(); @@ -250,6 +289,12 @@ impl CleanupRegistry { } } + while let Some(postgres_id) = self.postgres_ids.pop() { + if let Err(error) = ensure_postgres_gone(client, org_id, &postgres_id, delete_timeout, poll_interval).await { + failures.push(format!("postgres {postgres_id}: {error}")); + } + } + if failures.is_empty() { Ok(()) } else { @@ -335,6 +380,51 @@ async fn ensure_service_gone( Ok(()) } +async fn ensure_postgres_gone( + client: &Client, + org_id: &str, + postgres_id: &str, + delete_timeout: Duration, + poll_interval: Duration, +) -> TestResult<()> { + eprintln!(" cleanup: ensuring postgres service is gone"); + + match client.postgres_service_get(org_id, postgres_id).await { + Ok(_) => {} + Err(clickhouse_cloud_api::Error::Api { status: 404, .. }) => return Ok(()), + Err(_) => {} + } + + match client.postgres_service_delete(org_id, postgres_id).await { + Ok(_) => {} + Err(clickhouse_cloud_api::Error::Api { status: 404, .. }) => return Ok(()), + Err(e) => return Err(e.into()), + } + + poll_until("postgres deletion", delete_timeout, poll_interval, || { + let client = client.clone(); + let org_id = org_id.to_string(); + let postgres_id = postgres_id.to_string(); + async move { + match client.postgres_service_get(&org_id, &postgres_id).await { + Ok(_) => Ok(None), + Err(clickhouse_cloud_api::Error::Api { status: 404, .. }) => Ok(Some(())), + Err(e) => { + let message = e.to_string(); + if message.contains("404") || message.contains("not found") { + Ok(Some(())) + } else { + Err(e.into()) + } + } + } + } + }) + .await?; + + Ok(()) +} + // ── Polling ────────────────────────────────────────────────────────── pub async fn poll_until( diff --git a/crates/clickhouse-cloud-api/tests/integration_postgres_test.rs b/crates/clickhouse-cloud-api/tests/integration_postgres_test.rs new file mode 100644 index 0000000..102516b --- /dev/null +++ b/crates/clickhouse-cloud-api/tests/integration_postgres_test.rs @@ -0,0 +1,447 @@ +mod integration; + +use clickhouse_cloud_api::models::*; +use integration::support::*; + +#[tokio::test] +#[ignore = "requires live ClickHouse Cloud credentials and provisions real resources"] +async fn cloud_postgres_crud_lifecycle() -> TestResult<()> { + let ctx = TestContext::from_env()?; + let client = create_client()?; + let mut cleanup = CleanupRegistry::default(); + + let test_result = async { + log_run_header("cloud_postgres_crud_lifecycle", &ctx); + let mut failures = FailureRecorder::default(); + let size = PgSize::R8gd_medium; + let storage_gb: i64 = 59; + + // ── Preflight ─────────────────────────────────────────────── + + log_phase("Preflight"); + let list_before = failures + .run( + &ctx, + StepKind::Blocking, + "check for leftover tagged postgres services", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let filters = ctx.postgres_run_tag_filters(); + async move { + let resp = client.postgres_service_get_list(&org_id).await?; + let services = resp + .result + .ok_or("postgres list returned no result")?; + let leftover: Vec<_> = services + .into_iter() + .filter(|s| filters_match_tags(&filters, &s.tags)) + .collect(); + Ok(leftover) + } + }, + ) + .await? + .expect("blocking steps always return a value"); + assert!( + list_before.is_empty(), + "found an existing tagged postgres service for this run id before create" + ); + + // ── Provision ─────────────────────────────────────────────── + + log_phase("Provision Postgres Service"); + + let create_body = PostgresServicePostRequest { + name: ctx.postgres_service_name(), + provider: PgProvider::Unknown(ctx.provider.clone()), + region: ctx.region.clone(), + size: size.clone(), + storage_size: storage_gb, + tags: Some(ctx.postgres_run_tags()), + ..Default::default() + }; + + let created = failures + .run(&ctx, StepKind::Blocking, "create postgres service", || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let body = create_body.clone(); + async move { + let resp = client.postgres_service_create(&org_id, &body).await?; + resp.result + .ok_or_else(|| "postgres create returned no result".into()) + } + }) + .await? + .expect("blocking steps always return a value"); + + let postgres_id = created.id.to_string(); + eprintln!("postgres_id: "); + cleanup.register_postgres(postgres_id.clone()); + + let ready = failures + .run( + &ctx, + StepKind::Blocking, + "wait for postgres service running", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + poll_until( + "postgres running state", + ctx.steady_state_timeout, + ctx.poll_interval, + || { + let client = client.clone(); + let org_id = org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + let resp = client + .postgres_service_get(&org_id, &postgres_id) + .await?; + let svc = resp + .result + .ok_or("postgres get returned no result")?; + if svc.state.to_string() == "running" { + Ok(Some(svc)) + } else { + Ok(None) + } + } + }, + ) + .await + } + }, + ) + .await? + .expect("blocking steps always return a value"); + + assert_eq!(ready.name, ctx.postgres_service_name()); + assert_eq!(ready.size.to_string(), size.to_string()); + assert_eq!(ready.storage_size, storage_gb); + assert_eq!(ready.region, ctx.region); + assert_eq!(ready.provider.to_string(), ctx.provider); + assert!( + !ready.hostname.is_empty(), + "running postgres service returned empty hostname" + ); + assert!( + !ready.connection_string.is_empty(), + "running postgres service returned empty connection string" + ); + + let listed = failures + .run( + &ctx, + StepKind::Blocking, + "verify postgres service is discoverable in list", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + async move { + let resp = client.postgres_service_get_list(&org_id).await?; + resp.result + .ok_or_else(|| "postgres list returned no result".into()) + } + }, + ) + .await? + .expect("blocking steps always return a value"); + assert!( + listed.iter().any(|s| s.id.to_string() == postgres_id), + "created postgres service was not visible in list" + ); + + // ── Certificates ──────────────────────────────────────────── + + log_phase("Certificates"); + failures + .run( + &ctx, + StepKind::NonBlocking, + "fetch postgres CA certificates", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + let pem = client + .postgres_service_certs_get(&org_id, &postgres_id) + .await?; + if !pem.contains("BEGIN CERTIFICATE") { + return Err(format!( + "cert response did not look like a PEM bundle: {} bytes", + pem.len() + ) + .into()); + } + Ok(()) + } + }, + ) + .await?; + + // ── Runtime Config ────────────────────────────────────────── + // + // PATCH is intentionally not exercised end-to-end here: the generated + // PgConfig struct has non-Option `serde_json::Value` fields that + // serialize as `null`, which the live API rejects with + // `Validation failed for following fields: pg_config.*`. Once the + // OpenAPI spec marks these fields as optional (or the generator + // emits Option) we can extend this phase to round-trip a + // change to max_connections and verify via GET. + + log_phase("Runtime Config"); + failures + .run( + &ctx, + StepKind::NonBlocking, + "get postgres runtime config", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + let resp = client + .postgres_instance_config_get(&org_id, &postgres_id) + .await?; + if resp.result.is_none() { + return Err("postgres config get returned no result".into()); + } + Ok(()) + } + }, + ) + .await?; + + // ── Patch (tags) ──────────────────────────────────────────── + // + // We exercise PATCH by updating `tags` rather than `name`. The beta + // Postgres PATCH endpoint rejects `name` values that the CREATE and + // Service PATCH endpoints accept (e.g. hyphens, plain alphanumerics) + // with "request body property can't be validated: name" — likely a + // server-side validation bug. Switch this phase back to name once + // the endpoint exits beta and accepts the same grammar as CREATE. + + log_phase("Patch (tags)"); + let mut new_tags = ctx.postgres_run_tags(); + new_tags.push(ResourceTagsV1 { + key: "phase".to_string(), + value: Some("patched".to_string()), + }); + failures + .run(&ctx, StepKind::Blocking, "patch postgres tags", || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + let tags = new_tags.clone(); + async move { + let body = PostgresServicePatchRequest { + tags: Some(tags), + ..Default::default() + }; + client + .postgres_service_patch(&org_id, &postgres_id, &body) + .await?; + Ok(()) + } + }) + .await?; + + failures + .run( + &ctx, + StepKind::Blocking, + "verify tag patch visible in get", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + let resp = client + .postgres_service_get(&org_id, &postgres_id) + .await?; + let svc = resp.result.ok_or("postgres get returned no result")?; + let has_phase_tag = svc.tags.iter().any(|t| { + t.key == "phase" && t.value.as_deref() == Some("patched") + }); + if !has_phase_tag { + return Err("patched `phase=patched` tag not present on service after PATCH".into()); + } + Ok(()) + } + }, + ) + .await?; + + // ── Password ──────────────────────────────────────────────── + + // Per OpenAPI spec, PostgresServicePasswordResource.password is only + // populated when the request omits `password` (server-generated path). + // Because `PostgresServiceSetPassword.password` is a required String + // in the generated model, we exercise the user-supplied path here and + // treat a successful 200 as the pass condition — the response will + // correctly contain an empty/absent password in that case. + log_phase("Password"); + failures + .run( + &ctx, + StepKind::NonBlocking, + "reset postgres superuser password", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + let new_password = format!("ItPw-{}-Xx!9", ctx.run_id); + async move { + let body = PostgresServiceSetPassword { + password: new_password, + }; + client + .postgres_service_set_password(&org_id, &postgres_id, &body) + .await?; + Ok(()) + } + }, + ) + .await?; + + // ── Restart ───────────────────────────────────────────────── + + log_phase("Restart"); + failures + .run(&ctx, StepKind::Blocking, "restart postgres service", || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + let timeout = ctx.create_timeout; + let interval = ctx.poll_interval; + async move { + client + .postgres_service_patch_state( + &org_id, + &postgres_id, + &PostgresServiceSetState { + command: PostgresServiceSetStateCommand::Restart, + }, + ) + .await?; + poll_until("postgres running after restart", timeout, interval, || { + let client = client.clone(); + let org_id = org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + let resp = client + .postgres_service_get(&org_id, &postgres_id) + .await?; + let svc = resp + .result + .ok_or("postgres get returned no result")?; + if svc.state.to_string() == "running" { + Ok(Some(())) + } else { + Ok(None) + } + } + }) + .await?; + Ok(()) + } + }) + .await?; + + // ── Delete ────────────────────────────────────────────────── + + log_phase("Delete"); + failures + .run(&ctx, StepKind::Blocking, "delete postgres service", || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + client.postgres_service_delete(&org_id, &postgres_id).await?; + Ok(()) + } + }) + .await?; + + failures + .run( + &ctx, + StepKind::Blocking, + "confirm postgres service is gone after delete", + || { + let client = client.clone(); + let org_id = ctx.org_id.clone(); + let postgres_id = postgres_id.clone(); + let timeout = ctx.delete_timeout; + let interval = ctx.poll_interval; + async move { + poll_until("postgres deletion", timeout, interval, || { + let client = client.clone(); + let org_id = org_id.clone(); + let postgres_id = postgres_id.clone(); + async move { + match client + .postgres_service_get(&org_id, &postgres_id) + .await + { + Ok(_) => Ok(None), + Err(clickhouse_cloud_api::Error::Api { + status: 404, .. + }) => Ok(Some(())), + Err(e) => { + let message = e.to_string(); + if message.contains("404") + || message.contains("not found") + { + Ok(Some(())) + } else { + Err(e.into()) + } + } + } + } + }) + .await?; + Ok(()) + } + }, + ) + .await?; + cleanup.unregister_postgres(&postgres_id); + + failures.finish() + } + .await; + + let cleanup_result = cleanup + .cleanup(&client, &ctx.org_id, ctx.delete_timeout, ctx.poll_interval) + .await; + + match (test_result, cleanup_result) { + (Ok(()), Ok(())) => Ok(()), + (Err(error), Ok(())) => Err(error), + (Ok(()), Err(cleanup_error)) => Err(cleanup_error.into()), + (Err(error), Err(cleanup_error)) => { + Err(format!("{error}\ncleanup failed:\n{cleanup_error}").into()) + } + } +} + +fn filters_match_tags(filters: &[String], tags: &[ResourceTagsV1]) -> bool { + filters.iter().all(|filter| { + let Some(expr) = filter.strip_prefix("tag:") else { + return true; + }; + let Some((key, value)) = expr.split_once('=') else { + return tags.iter().any(|t| t.key == expr); + }; + tags.iter() + .any(|t| t.key == key && t.value.as_deref() == Some(value)) + }) +} From 8ead203d43b060f5552de2c47a755fc04733c86d Mon Sep 17 00:00:00 2001 From: sdairs Date: Sun, 19 Apr 2026 15:27:50 +0100 Subject: [PATCH 04/12] Switch tabled tables to markdown style Tables now render using only ASCII `|` and `-` instead of the non-standard rounded box-drawing characters, so output is readable in minimal terminals and log aggregators and pasteable into issues/PRs. Closes #126 --- crates/clickhousectl/src/cloud/commands.rs | 16 ++++++++-------- crates/clickhousectl/src/cloud/postgres.rs | 2 +- crates/clickhousectl/src/local/output.rs | 8 ++++---- crates/clickhousectl/src/main.rs | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/clickhousectl/src/cloud/commands.rs b/crates/clickhousectl/src/cloud/commands.rs index 72ceefa..e263dc7 100644 --- a/crates/clickhousectl/src/cloud/commands.rs +++ b/crates/clickhousectl/src/cloud/commands.rs @@ -428,7 +428,7 @@ pub async fn org_list(client: &CloudClient, json: bool) -> Result<(), Box")?; @@ -283,7 +283,7 @@ impl fmt::Display for ServerListOutput { project: e.project.clone().unwrap_or_default(), }) .collect(); - let table = Table::new(rows).with(Style::rounded()).to_string(); + let table = Table::new(rows).with(Style::markdown()).to_string(); writeln!(f, "{table}")?; } else { let rows: Vec = self @@ -302,7 +302,7 @@ impl fmt::Display for ServerListOutput { tcp_port: e.tcp_port.map(|p| p.to_string()).unwrap_or_default(), }) .collect(); - let table = Table::new(rows).with(Style::rounded()).to_string(); + let table = Table::new(rows).with(Style::markdown()).to_string(); writeln!(f, "{table}")?; } diff --git a/crates/clickhousectl/src/main.rs b/crates/clickhousectl/src/main.rs index 14df9c8..dd7decb 100644 --- a/crates/clickhousectl/src/main.rs +++ b/crates/clickhousectl/src/main.rs @@ -291,7 +291,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { if args.json { println!("{}", serde_json::to_string_pretty(&rows)?); } else { - println!("{}", Table::new(rows).with(Style::rounded())); + println!("{}", Table::new(rows).with(Style::markdown())); } Ok(()) } From 788721bae6fd7385f29adf818511dbf2ef6f34f1 Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 17:56:25 +0100 Subject: [PATCH 05/12] Tag clickhouse.com requests with detected AI agent Closes #133. Uses the is-ai-agent crate to detect when the CLI is invoked under a known agent (Claude Code, Cursor, Gemini CLI, Codex, Goose, Devin, etc.) and appends an agent= query param to outbound requests to ClickHouse-owned hosts (builds.clickhouse.com, packages.clickhouse.com, api.clickhouse.cloud). GitHub and other third-party hosts are not annotated. The cloud library gains a generic Client::with_extra_query_params builder so the CLI can attach the tag to every request. --- CLAUDE.md | 1 + Cargo.lock | 9 +- README.md | 4 + crates/clickhouse-cloud-api/src/client.rs | 29 +++- .../clickhouse-cloud-api/tests/client_test.rs | 65 +++++++++ crates/clickhousectl/Cargo.toml | 1 + crates/clickhousectl/src/agent_signal.rs | 124 ++++++++++++++++++ crates/clickhousectl/src/cloud/client.rs | 25 ++-- crates/clickhousectl/src/main.rs | 1 + .../src/version_manager/download.rs | 3 +- .../clickhousectl/src/version_manager/list.rs | 5 +- .../src/version_manager/resolve.rs | 10 +- 12 files changed, 261 insertions(+), 16 deletions(-) create mode 100644 crates/clickhousectl/src/agent_signal.rs diff --git a/CLAUDE.md b/CLAUDE.md index e5bc5f8..7f8850f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -122,6 +122,7 @@ cargo add -p clickhouse-cloud-api url - `src/paths.rs` handles `~/.clickhouse/` paths (global install dir); `src/init.rs` handles `.clickhouse/` paths (project-local data dir) - `local client` uses `exec()` (process replacement), so code after `cmd.exec()` only runs on failure - Error types use `thiserror` in `src/error.rs`; cloud module has its own error type wrapped as `Error::Cloud(String)` +- AI agent attribution lives in `src/agent_signal.rs`. When the CLI runs under a detected agent, an `agent=` query param is appended to requests targeting ClickHouse-owned hosts only (helper: `add_agent_query_for(builder, url)`). The cloud library accepts default query params via `Client::with_extra_query_params`, populated by `tag_with_agent` in `cloud/client.rs`. Do not extend tagging to GitHub or other third-party hosts. - Version resolution (`version_manager/resolve.rs`) handles specs like `stable`, `lts`, `25.12`, or exact `25.12.5.44` — all resolve to an exact version + channel via GitHub API - Releases are triggered by pushing a version tag (`v0.1.3`), which runs the GitHub Actions workflow diff --git a/Cargo.lock b/Cargo.lock index b0cd0aa..487ce09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -266,6 +266,7 @@ dependencies = [ "flate2", "futures-util", "indicatif", + "is-ai-agent", "libc", "open", "reqwest", @@ -937,6 +938,12 @@ dependencies = [ "serde", ] +[[package]] +name = "is-ai-agent" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4046d7cc8977d298ddefa0d108d21a4242d4ea81d522545ae1f01dc679b5c182" + [[package]] name = "is-docker" version = "0.2.0" @@ -1872,7 +1879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.2", "once_cell", "rustix", "windows-sys 0.61.2", diff --git a/README.md b/README.md index 4ebfc35..792980a 100644 --- a/README.md +++ b/README.md @@ -579,6 +579,10 @@ clickhousectl update --check The CLI also checks for updates in the background (at most once per 24 hours) and displays a notice when a newer version is available. +## AI agent attribution + +When `clickhousectl` is invoked from inside a known AI coding agent (e.g. Claude Code, Cursor, Gemini CLI, Codex, Goose), it tags outbound requests to ClickHouse-owned hosts (`builds.clickhouse.com`, `packages.clickhouse.com`, `api.clickhouse.cloud`) with an `agent=` query parameter. Detection is via the [`is-ai-agent`](https://crates.io/crates/is-ai-agent) crate, which reads the standard `AGENT` env var and tool-specific signals. No tag is added for human-driven invocations, and third-party download mirrors (GitHub Releases) are never annotated. + ## Cloud integration testing Cloud API integration is tested against a real ClickHouse Cloud workspace via the library crate. All changes to cloud commands must pass CI testing before merge. Tests are in [`crates/clickhouse-cloud-api/tests/integration_test.rs`](crates/clickhouse-cloud-api/tests/integration_test.rs). diff --git a/crates/clickhouse-cloud-api/src/client.rs b/crates/clickhouse-cloud-api/src/client.rs index ac75a6c..5480216 100644 --- a/crates/clickhouse-cloud-api/src/client.rs +++ b/crates/clickhouse-cloud-api/src/client.rs @@ -20,6 +20,7 @@ pub struct Client { http: reqwest::Client, base_url: String, auth: Auth, + extra_query_params: Vec<(String, String)>, } impl Client { @@ -41,6 +42,7 @@ impl Client { key_id: key_id.into(), key_secret: key_secret.into(), }, + extra_query_params: Vec::new(), } } @@ -55,6 +57,7 @@ impl Client { auth: Auth::Bearer { token: token.into(), }, + extra_query_params: Vec::new(), } } @@ -75,6 +78,7 @@ impl Client { key_id: key_id.into(), key_secret: key_secret.into(), }, + extra_query_params: Vec::new(), } } @@ -93,9 +97,26 @@ impl Client { auth: Auth::Bearer { token: token.into(), }, + extra_query_params: Vec::new(), } } + /// Attach extra query parameters that should be appended to every request + /// this client makes. Useful for callers that want to surface a CLI- or + /// runtime-level signal (e.g. an `agent` tag) to the API for analytics. + /// + /// Multiple calls accumulate; existing params are preserved. + pub fn with_extra_query_params(mut self, params: I) -> Self + where + I: IntoIterator, + K: Into, + V: Into, + { + self.extra_query_params + .extend(params.into_iter().map(|(k, v)| (k.into(), v.into()))); + self + } + /// Replace the Bearer token without rebuilding the client. /// /// Useful for refreshing an expired OAuth token. @@ -113,13 +134,17 @@ impl Client { } fn request(&self, method: reqwest::Method, path: &str) -> reqwest::RequestBuilder { - let builder = self + let mut builder = self .http .request(method, format!("{}{}", self.base_url, path)); - match &self.auth { + builder = match &self.auth { Auth::Basic { key_id, key_secret } => builder.basic_auth(key_id, Some(key_secret)), Auth::Bearer { token } => builder.bearer_auth(token), + }; + if !self.extra_query_params.is_empty() { + builder = builder.query(&self.extra_query_params); } + builder } /// Get list of available organizations diff --git a/crates/clickhouse-cloud-api/tests/client_test.rs b/crates/clickhouse-cloud-api/tests/client_test.rs index 0b24396..5f0f675 100644 --- a/crates/clickhouse-cloud-api/tests/client_test.rs +++ b/crates/clickhouse-cloud-api/tests/client_test.rs @@ -2786,3 +2786,68 @@ async fn default_base_url_is_production() { // but we can verify the client is constructable without panicking. let _client = Client::new("key", "secret"); } + +// =========================================================================== +// Extra query params (e.g. agent attribution tag) +// =========================================================================== + +#[tokio::test] +async fn extra_query_params_are_applied_to_every_request() { + let mock_server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/v1/organizations")) + .and(query_param("agent", "claude-code")) + .respond_with(ok_json(serde_json::json!([]))) + .mount(&mock_server) + .await; + + let client = Client::with_base_url(mock_server.uri(), "key", "secret") + .with_extra_query_params([("agent", "claude-code")]); + let resp = client.organization_get_list().await.unwrap(); + assert_eq!(resp.result.unwrap().len(), 0); +} + +#[tokio::test] +async fn extra_query_params_coexist_with_endpoint_query_params() { + // The library already attaches `from_date`/`to_date` to usageCost — make + // sure our default params don't clobber them. + let mock_server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/v1/organizations/org-1/usageCost")) + .and(query_param("from_date", "2024-01-01")) + .and(query_param("to_date", "2024-01-31")) + .and(query_param("agent", "claude-code")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "status": 200, + "result": { + "grandTotalCHC": 0.0, + "costsByDate": [] + } + }))) + .mount(&mock_server) + .await; + + let client = Client::with_base_url(mock_server.uri(), "key", "secret") + .with_extra_query_params([("agent", "claude-code")]); + client + .usage_cost_get("org-1", "2024-01-01", "2024-01-31", &[]) + .await + .unwrap(); +} + +#[tokio::test] +async fn empty_extra_query_params_keep_url_clean() { + // Client built with no extras should not append a stray empty `?`. + let mock_server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/v1/organizations")) + .respond_with(ok_json(serde_json::json!([]))) + .mount(&mock_server) + .await; + + let client = Client::with_base_url(mock_server.uri(), "key", "secret"); + client.organization_get_list().await.unwrap(); +} diff --git a/crates/clickhousectl/Cargo.toml b/crates/clickhousectl/Cargo.toml index e138cb7..14c73c9 100644 --- a/crates/clickhousectl/Cargo.toml +++ b/crates/clickhousectl/Cargo.toml @@ -24,6 +24,7 @@ url = "2.5.8" tabled = "0.20.0" clickhouse-cloud-api = { version = "0.1.0", path = "../clickhouse-cloud-api" } uuid = { version = "1.23.0", features = ["v4"] } +is-ai-agent = "0.1.0" [dev-dependencies] tempfile = "3.27.0" diff --git a/crates/clickhousectl/src/agent_signal.rs b/crates/clickhousectl/src/agent_signal.rs new file mode 100644 index 0000000..7963ab2 --- /dev/null +++ b/crates/clickhousectl/src/agent_signal.rs @@ -0,0 +1,124 @@ +//! Detect when the CLI is being driven by an AI coding agent and surface that +//! signal as an `agent` query parameter on outbound clickhouse.com / +//! clickhouse.cloud requests, so server-side analytics can attribute usage. +//! +//! Detection is delegated to the `is_ai_agent` crate, which inspects standard +//! and tool-specific environment variables (e.g. `AGENT`, `CLAUDECODE`). + +use is_ai_agent::AgentId; +use reqwest::RequestBuilder; + +/// Canonical kebab-case identifier for the detected agent, suitable for use +/// as a URL query value. Returns `None` when no agent signal is present. +pub fn detected_agent_id() -> Option<&'static str> { + is_ai_agent::detect().map(|a| agent_id_str(a.id)) +} + +fn agent_id_str(id: AgentId) -> &'static str { + match id { + AgentId::ClaudeCode => "claude-code", + AgentId::Cursor => "cursor", + AgentId::GeminiCli => "gemini-cli", + AgentId::Codex => "codex", + AgentId::Augment => "augment", + AgentId::Cline => "cline", + AgentId::OpenCode => "opencode", + AgentId::Trae => "trae", + AgentId::Goose => "goose", + AgentId::Amp => "amp", + AgentId::Devin => "devin", + AgentId::Unknown => "unknown", + } +} + +/// Append the `agent=` query parameter to a request when an AI coding +/// agent is detected. Pass-through when no agent is present. +pub fn add_agent_query(builder: RequestBuilder) -> RequestBuilder { + match detected_agent_id() { + Some(id) => builder.query(&[("agent", id)]), + None => builder, + } +} + +/// Like `add_agent_query`, but only annotates requests targeting +/// ClickHouse-owned hosts (so we don't leak the signal to GitHub or other +/// third-party download mirrors). +pub fn add_agent_query_for(builder: RequestBuilder, url: &str) -> RequestBuilder { + if is_clickhouse_url(url) { + add_agent_query(builder) + } else { + builder + } +} + +fn is_clickhouse_url(url: &str) -> bool { + let host = match url.split_once("://") { + Some((_, rest)) => rest.split('/').next().unwrap_or(""), + None => url, + }; + let host = host.split(':').next().unwrap_or(host); + host == "clickhouse.com" + || host == "clickhouse.cloud" + || host.ends_with(".clickhouse.com") + || host.ends_with(".clickhouse.cloud") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn maps_every_known_agent_id() { + // Exhaustive smoke check that no AgentId variant gets the empty string. + for id in [ + AgentId::ClaudeCode, + AgentId::Cursor, + AgentId::GeminiCli, + AgentId::Codex, + AgentId::Augment, + AgentId::Cline, + AgentId::OpenCode, + AgentId::Trae, + AgentId::Goose, + AgentId::Amp, + AgentId::Devin, + AgentId::Unknown, + ] { + let s = agent_id_str(id); + assert!(!s.is_empty()); + // Identifier must be URL-safe — no spaces or uppercase. + assert!(s.chars().all(|c| c.is_ascii_lowercase() || c == '-')); + } + } + + #[test] + fn claude_code_id_is_kebab_case() { + assert_eq!(agent_id_str(AgentId::ClaudeCode), "claude-code"); + assert_eq!(agent_id_str(AgentId::GeminiCli), "gemini-cli"); + } + + #[test] + fn detects_clickhouse_owned_hosts() { + assert!(is_clickhouse_url("https://builds.clickhouse.com/master/amd64/clickhouse")); + assert!(is_clickhouse_url( + "https://packages.clickhouse.com/tgz/stable/clickhouse-common-static-25.12.9.61-amd64.tgz" + )); + assert!(is_clickhouse_url("https://api.clickhouse.cloud/v1/organizations")); + assert!(is_clickhouse_url("https://clickhouse.com/docs/cloud/")); + } + + #[test] + fn rejects_non_clickhouse_hosts() { + assert!(!is_clickhouse_url( + "https://github.com/ClickHouse/ClickHouse/releases/download/v25.12.5.44-stable/clickhouse-macos-aarch64" + )); + assert!(!is_clickhouse_url("https://api.github.com/repos/ClickHouse/ClickHouse/releases")); + } + + #[test] + fn rejects_lookalike_hosts() { + // Suffix match must be a true subdomain, not a substring of an attacker-controlled host. + assert!(!is_clickhouse_url("https://evil-clickhouse.com/path")); + assert!(!is_clickhouse_url("https://clickhouse.com.attacker.com/x")); + } +} diff --git a/crates/clickhousectl/src/cloud/client.rs b/crates/clickhousectl/src/cloud/client.rs index fe4466d..639b18e 100644 --- a/crates/clickhousectl/src/cloud/client.rs +++ b/crates/clickhousectl/src/cloud/client.rs @@ -116,6 +116,15 @@ fn lib_base_url(cli_base_url: &str) -> String { .to_string() } +/// Tag every request with `agent=` when an AI coding agent is driving the +/// CLI, so server-side analytics can attribute usage. No-op for human users. +fn tag_with_agent(client: clickhouse_cloud_api::Client) -> clickhouse_cloud_api::Client { + match crate::agent_signal::detected_agent_id() { + Some(id) => client.with_extra_query_params([("agent", id)]), + None => client, + } +} + impl CloudClient { pub fn new( api_key: Option<&str>, @@ -141,12 +150,12 @@ impl CloudClient { let base_url = url_override .map(crate::cloud::auth::normalize_api_url) .unwrap_or_else(|| DEFAULT_BASE_URL.to_string()); - let lib_client = clickhouse_cloud_api::Client::with_http_client( + let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client( http, lib_base_url(&base_url), &key, &secret, - ); + )); return Ok(Self { lib_client, auth_mode: AuthMode::Basic, @@ -161,12 +170,12 @@ impl CloudClient { // Try file credentials if let Some(creds) = crate::cloud::credentials::load_credentials() { - let lib_client = clickhouse_cloud_api::Client::with_http_client( + let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client( http, lib_base_url(&base_url), &creds.api_key, &creds.api_secret, - ); + )); return Ok(Self { lib_client, auth_mode: AuthMode::Basic, @@ -179,12 +188,12 @@ impl CloudClient { let env_key = env::var("CLICKHOUSE_CLOUD_API_KEY").ok(); let env_secret = env::var("CLICKHOUSE_CLOUD_API_SECRET").ok(); if let (Some(key), Some(secret)) = (env_key, env_secret) { - let lib_client = clickhouse_cloud_api::Client::with_http_client( + let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client( http, lib_base_url(&base_url), &key, &secret, - ); + )); return Ok(Self { lib_client, auth_mode: AuthMode::Basic, @@ -200,11 +209,11 @@ impl CloudClient { let base_url = url_override .map(crate::cloud::auth::normalize_api_url) .unwrap_or(tokens.api_url.clone()); - let lib_client = clickhouse_cloud_api::Client::with_http_client_bearer( + let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client_bearer( http, lib_base_url(&base_url), &tokens.access_token, - ); + )); return Ok(Self { lib_client, auth_mode: AuthMode::Bearer, diff --git a/crates/clickhousectl/src/main.rs b/crates/clickhousectl/src/main.rs index dd7decb..1310676 100644 --- a/crates/clickhousectl/src/main.rs +++ b/crates/clickhousectl/src/main.rs @@ -1,3 +1,4 @@ +mod agent_signal; mod cli; mod cloud; mod error; diff --git a/crates/clickhousectl/src/version_manager/download.rs b/crates/clickhousectl/src/version_manager/download.rs index 5e2f604..d8d2da1 100644 --- a/crates/clickhousectl/src/version_manager/download.rs +++ b/crates/clickhousectl/src/version_manager/download.rs @@ -21,8 +21,7 @@ pub async fn download_url(url: &str, dest_path: &Path) -> Result<()> { .user_agent(crate::user_agent::user_agent()) .build()?; - let response = client - .get(url) + let response = crate::agent_signal::add_agent_query_for(client.get(url), url) .send() .await? .error_for_status() diff --git a/crates/clickhousectl/src/version_manager/list.rs b/crates/clickhousectl/src/version_manager/list.rs index 46e1328..84574b9 100644 --- a/crates/clickhousectl/src/version_manager/list.rs +++ b/crates/clickhousectl/src/version_manager/list.rs @@ -130,7 +130,10 @@ pub async fn list_available_versions_from_builds() -> Result> { for mm in (1..=12).rev() { let version_path = format!("{}.{}", yy, mm); let url = builds_probe_url(&version_path, &platform); - match client.head(&url).send().await { + match crate::agent_signal::add_agent_query_for(client.head(&url), &url) + .send() + .await + { Ok(resp) if resp.status().is_success() => { available.push(version_path); } diff --git a/crates/clickhousectl/src/version_manager/resolve.rs b/crates/clickhousectl/src/version_manager/resolve.rs index 4c334d7..d7af9c5 100644 --- a/crates/clickhousectl/src/version_manager/resolve.rs +++ b/crates/clickhousectl/src/version_manager/resolve.rs @@ -83,7 +83,10 @@ async fn resolve_major(major: u32, platform: &Platform) -> Result { highest_available = Some(minor); } @@ -223,7 +226,10 @@ async fn probe_builds(version_path: &str, platform: &Platform) -> bool { Err(_) => return false, }; - match client.head(&url).send().await { + match crate::agent_signal::add_agent_query_for(client.head(&url), &url) + .send() + .await + { Ok(resp) => resp.status().is_success(), Err(_) => false, } From 1f0156a98c97d5206a1f8c2a0e9c8d5d38e6dfdb Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 19:19:56 +0100 Subject: [PATCH 06/12] Collapse CloudClient::new credential ladder into resolve_auth Three of the four credential branches in CloudClient::new differed only in which (key, secret) pair they pulled and which AuthSource label to attach. Introduce ResolvedAuth + resolve_auth() to walk the precedence ladder once, then build the lib client and tag it with the agent param at a single site. resolve_active_auth_source becomes a thin wrapper that preserves its lenient half-set CLI-flag behavior for cloud auth status. --- crates/clickhousectl/src/cloud/client.rs | 204 ++++++++++++----------- 1 file changed, 105 insertions(+), 99 deletions(-) diff --git a/crates/clickhousectl/src/cloud/client.rs b/crates/clickhousectl/src/cloud/client.rs index 639b18e..646df82 100644 --- a/crates/clickhousectl/src/cloud/client.rs +++ b/crates/clickhousectl/src/cloud/client.rs @@ -39,6 +39,90 @@ pub enum AuthSource { OAuthTokens, } +/// Credentials picked by the precedence ladder, paired with the auth scheme +/// the lib client should be built with. +enum ResolvedCreds { + Basic { key: String, secret: String }, + Bearer { token: String }, +} + +/// One winning credential set: the keys/token, the source label, and the +/// API base URL the caller should talk to. +struct ResolvedAuth { + creds: ResolvedCreds, + source: AuthSource, + base_url: String, +} + +/// Walk the precedence ladder once. Order: CLI flags, credentials file, env +/// vars, OAuth tokens. Errors only when CLI flags are half-set (key without +/// secret or vice versa) or when nothing usable is configured. +fn resolve_auth( + api_key: Option<&str>, + api_secret: Option<&str>, + url_override: Option<&str>, +) -> Result { + let normalized_default = || { + url_override + .map(crate::cloud::auth::normalize_api_url) + .unwrap_or_else(|| DEFAULT_BASE_URL.to_string()) + }; + + if api_key.is_some() || api_secret.is_some() { + let key = api_key.map(String::from).ok_or_else(|| CloudError { + message: "API key required when --api-key or --api-secret is set".into(), + })?; + let secret = api_secret.map(String::from).ok_or_else(|| CloudError { + message: "API secret required when --api-key or --api-secret is set".into(), + })?; + return Ok(ResolvedAuth { + creds: ResolvedCreds::Basic { key, secret }, + source: AuthSource::CliFlags, + base_url: normalized_default(), + }); + } + + if let Some(creds) = crate::cloud::credentials::load_credentials() { + return Ok(ResolvedAuth { + creds: ResolvedCreds::Basic { + key: creds.api_key, + secret: creds.api_secret, + }, + source: AuthSource::CredentialsFile, + base_url: normalized_default(), + }); + } + + let env_key = env::var("CLICKHOUSE_CLOUD_API_KEY").ok(); + let env_secret = env::var("CLICKHOUSE_CLOUD_API_SECRET").ok(); + if let (Some(key), Some(secret)) = (env_key, env_secret) { + return Ok(ResolvedAuth { + creds: ResolvedCreds::Basic { key, secret }, + source: AuthSource::EnvVars, + base_url: normalized_default(), + }); + } + + if let Some(tokens) = crate::cloud::auth::load_tokens() + && crate::cloud::auth::is_token_valid(&tokens) + { + let base_url = url_override + .map(crate::cloud::auth::normalize_api_url) + .unwrap_or(tokens.api_url); + return Ok(ResolvedAuth { + creds: ResolvedCreds::Bearer { + token: tokens.access_token, + }, + source: AuthSource::OAuthTokens, + base_url, + }); + } + + Err(CloudError { + message: "No credentials found. Run `clickhousectl cloud auth login` (OAuth, read-only), `clickhousectl cloud auth login --api-key KEY --api-secret SECRET` (read/write), set CLICKHOUSE_CLOUD_API_KEY + CLICKHOUSE_CLOUD_API_SECRET, or use --api-key/--api-secret.\n\nLearn how to create API keys: https://clickhouse.com/docs/cloud/manage/openapi?referrer=clickhousectl".into(), + }) +} + /// Resolve the credential source that *would* win precedence if a `CloudClient` /// were constructed right now, without actually creating one. /// @@ -49,23 +133,13 @@ pub fn resolve_active_auth_source( api_key: Option<&str>, api_secret: Option<&str>, ) -> Option { + // Half-set CLI flags still indicate intent to use CLI flags, even though + // `resolve_auth` would reject them. This keeps `cloud auth status` honest + // about what the user *tried* to configure. if api_key.is_some() || api_secret.is_some() { return Some(AuthSource::CliFlags); } - if crate::cloud::credentials::load_credentials().is_some() { - return Some(AuthSource::CredentialsFile); - } - let env_key = env::var("CLICKHOUSE_CLOUD_API_KEY").ok(); - let env_secret = env::var("CLICKHOUSE_CLOUD_API_SECRET").ok(); - if env_key.is_some() && env_secret.is_some() { - return Some(AuthSource::EnvVars); - } - if let Some(tokens) = crate::cloud::auth::load_tokens() - && crate::cloud::auth::is_token_valid(&tokens) - { - return Some(AuthSource::OAuthTokens); - } - None + resolve_auth(None, None, None).ok().map(|r| r.source) } impl AuthSource { @@ -138,92 +212,24 @@ impl CloudClient { message: format!("Failed to create HTTP client: {}", e), })?; - // Priority: CLI flags > file credentials > env vars > OAuth tokens - // API keys are project-scoped (read/write); OAuth is user-scoped (read-only). - if api_key.is_some() || api_secret.is_some() { - let key = api_key.map(String::from).ok_or_else(|| CloudError { - message: "API key required when --api-key or --api-secret is set".into(), - })?; - let secret = api_secret.map(String::from).ok_or_else(|| CloudError { - message: "API secret required when --api-key or --api-secret is set".into(), - })?; - let base_url = url_override - .map(crate::cloud::auth::normalize_api_url) - .unwrap_or_else(|| DEFAULT_BASE_URL.to_string()); - let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client( - http, - lib_base_url(&base_url), - &key, - &secret, - )); - return Ok(Self { - lib_client, - auth_mode: AuthMode::Basic, - auth_source: AuthSource::CliFlags, - base_url, - }); - } - - let base_url = url_override - .map(crate::cloud::auth::normalize_api_url) - .unwrap_or_else(|| DEFAULT_BASE_URL.to_string()); - - // Try file credentials - if let Some(creds) = crate::cloud::credentials::load_credentials() { - let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client( - http, - lib_base_url(&base_url), - &creds.api_key, - &creds.api_secret, - )); - return Ok(Self { - lib_client, - auth_mode: AuthMode::Basic, - auth_source: AuthSource::CredentialsFile, - base_url, - }); - } - - // Try env vars - let env_key = env::var("CLICKHOUSE_CLOUD_API_KEY").ok(); - let env_secret = env::var("CLICKHOUSE_CLOUD_API_SECRET").ok(); - if let (Some(key), Some(secret)) = (env_key, env_secret) { - let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client( - http, - lib_base_url(&base_url), - &key, - &secret, - )); - return Ok(Self { - lib_client, - auth_mode: AuthMode::Basic, - auth_source: AuthSource::EnvVars, - base_url, - }); - } - - // Fall back to OAuth tokens (read-only) - if let Some(tokens) = crate::cloud::auth::load_tokens() - && crate::cloud::auth::is_token_valid(&tokens) - { - let base_url = url_override - .map(crate::cloud::auth::normalize_api_url) - .unwrap_or(tokens.api_url.clone()); - let lib_client = tag_with_agent(clickhouse_cloud_api::Client::with_http_client_bearer( - http, - lib_base_url(&base_url), - &tokens.access_token, - )); - return Ok(Self { - lib_client, - auth_mode: AuthMode::Bearer, - auth_source: AuthSource::OAuthTokens, - base_url, - }); - } + let resolved = resolve_auth(api_key, api_secret, url_override)?; + let lib_url = lib_base_url(&resolved.base_url); + let (lib_client, auth_mode) = match &resolved.creds { + ResolvedCreds::Basic { key, secret } => ( + clickhouse_cloud_api::Client::with_http_client(http, lib_url, key, secret), + AuthMode::Basic, + ), + ResolvedCreds::Bearer { token } => ( + clickhouse_cloud_api::Client::with_http_client_bearer(http, lib_url, token), + AuthMode::Bearer, + ), + }; - Err(CloudError { - message: "No credentials found. Run `clickhousectl cloud auth login` (OAuth, read-only), `clickhousectl cloud auth login --api-key KEY --api-secret SECRET` (read/write), set CLICKHOUSE_CLOUD_API_KEY + CLICKHOUSE_CLOUD_API_SECRET, or use --api-key/--api-secret.\n\nLearn how to create API keys: https://clickhouse.com/docs/cloud/manage/openapi?referrer=clickhousectl".into(), + Ok(Self { + lib_client: tag_with_agent(lib_client), + auth_mode, + auth_source: resolved.source, + base_url: resolved.base_url, }) } From 0353e2254ea1674493d744014db57a140e6e528e Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 19:30:18 +0100 Subject: [PATCH 07/12] Drop dead api_key/api_secret params from resolve_active_auth_source The helper is only called from cloud auth status, which never has --api-key/--api-secret to pass (the subcommand doesn't accept them). The half-set lenient branch and its test were protecting a contract no production caller exercises. Inlining the only sensible call removes the dead parameters and the dead branch, leaving a one-line wrapper that documents its actual purpose: peek at credential precedence without erroring on the empty case (which auth status needs to render no-creds-configured correctly). --- crates/clickhousectl/src/cloud/client.rs | 38 ++++-------------------- crates/clickhousectl/src/main.rs | 2 +- 2 files changed, 7 insertions(+), 33 deletions(-) diff --git a/crates/clickhousectl/src/cloud/client.rs b/crates/clickhousectl/src/cloud/client.rs index 646df82..7c4d52f 100644 --- a/crates/clickhousectl/src/cloud/client.rs +++ b/crates/clickhousectl/src/cloud/client.rs @@ -123,22 +123,13 @@ fn resolve_auth( }) } -/// Resolve the credential source that *would* win precedence if a `CloudClient` -/// were constructed right now, without actually creating one. +/// Peek which credential source would win precedence right now without +/// actually building a `CloudClient`. /// -/// Returns `None` if no usable credentials are configured. Mirrors the -/// precedence used by `CloudClient::new`: CLI flags > credentials file -/// > env vars > OAuth tokens. -pub fn resolve_active_auth_source( - api_key: Option<&str>, - api_secret: Option<&str>, -) -> Option { - // Half-set CLI flags still indicate intent to use CLI flags, even though - // `resolve_auth` would reject them. This keeps `cloud auth status` honest - // about what the user *tried* to configure. - if api_key.is_some() || api_secret.is_some() { - return Some(AuthSource::CliFlags); - } +/// Used by `cloud auth status`, which has to render correctly even when no +/// credentials are configured (the case `CloudClient::new` errors out on). +/// Returns `None` if nothing usable is configured. +pub fn resolve_active_auth_source() -> Option { resolve_auth(None, None, None).ok().map(|r| r.source) } @@ -983,23 +974,6 @@ mod tests { assert_eq!(client.base_url(), DEFAULT_BASE_URL); } - #[test] - fn resolve_active_auth_source_cli_flags_take_precedence() { - // CLI flags must always win, regardless of other configured sources. - assert_eq!( - resolve_active_auth_source(Some("k"), Some("s")), - Some(AuthSource::CliFlags) - ); - assert_eq!( - resolve_active_auth_source(Some("k"), None), - Some(AuthSource::CliFlags) - ); - assert_eq!( - resolve_active_auth_source(None, Some("s")), - Some(AuthSource::CliFlags) - ); - } - #[test] fn convert_error_no_hint_for_403_basic() { let client = test_client(); diff --git a/crates/clickhousectl/src/main.rs b/crates/clickhousectl/src/main.rs index 1310676..b0e6f3e 100644 --- a/crates/clickhousectl/src/main.rs +++ b/crates/clickhousectl/src/main.rs @@ -193,7 +193,7 @@ async fn run_cloud(args: CloudArgs) -> Result<()> { // Determine which source would actually win precedence right now. // CLI --api-key/--api-secret aren't relevant to `auth status` itself. - let active = cloud::resolve_active_auth_source(None, None); + let active = cloud::resolve_active_auth_source(); let mark = |src: cloud::AuthSource| -> String { if active == Some(src) { "yes".into() } else { "-".into() } }; From a8214ffe5014ae28ce4baec01d8d90ff9f94cfaf Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 19:45:10 +0100 Subject: [PATCH 08/12] Use is-ai-agent's canonical AgentId::as_str instead of a local mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The local AgentId -> kebab-case match arm was duplicating a mapping the crate already maintains for its AGENT= env var parser. is-ai-agent 0.2.1 exposes AgentId::as_str returning the canonical kebab-case id (claude-code, gemini-cli, etc.) — the inverse of the parser. Switch to it. Replaces our 12-arm match plus exhaustive variant test with a single delegation and a contract test against the upstream lookup. New agents added upstream automatically flow through without code changes here. --- Cargo.lock | 4 +- crates/clickhousectl/Cargo.toml | 2 +- crates/clickhousectl/src/agent_signal.rs | 62 ++++++------------------ 3 files changed, 17 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 487ce09..66b22b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -940,9 +940,9 @@ dependencies = [ [[package]] name = "is-ai-agent" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4046d7cc8977d298ddefa0d108d21a4242d4ea81d522545ae1f01dc679b5c182" +checksum = "a8745cc12e6796e1b20733dcc27e6062d51a22083eb00eaf0ed0a0880a60d302" [[package]] name = "is-docker" diff --git a/crates/clickhousectl/Cargo.toml b/crates/clickhousectl/Cargo.toml index 14c73c9..2f5715e 100644 --- a/crates/clickhousectl/Cargo.toml +++ b/crates/clickhousectl/Cargo.toml @@ -24,7 +24,7 @@ url = "2.5.8" tabled = "0.20.0" clickhouse-cloud-api = { version = "0.1.0", path = "../clickhouse-cloud-api" } uuid = { version = "1.23.0", features = ["v4"] } -is-ai-agent = "0.1.0" +is-ai-agent = "0.2.1" [dev-dependencies] tempfile = "3.27.0" diff --git a/crates/clickhousectl/src/agent_signal.rs b/crates/clickhousectl/src/agent_signal.rs index 7963ab2..de4a98b 100644 --- a/crates/clickhousectl/src/agent_signal.rs +++ b/crates/clickhousectl/src/agent_signal.rs @@ -5,30 +5,13 @@ //! Detection is delegated to the `is_ai_agent` crate, which inspects standard //! and tool-specific environment variables (e.g. `AGENT`, `CLAUDECODE`). -use is_ai_agent::AgentId; use reqwest::RequestBuilder; -/// Canonical kebab-case identifier for the detected agent, suitable for use -/// as a URL query value. Returns `None` when no agent signal is present. +/// Canonical kebab-case identifier for the detected agent, as defined by the +/// `is-ai-agent` crate (`AgentId::as_str`). Returns `None` when no agent +/// signal is present. pub fn detected_agent_id() -> Option<&'static str> { - is_ai_agent::detect().map(|a| agent_id_str(a.id)) -} - -fn agent_id_str(id: AgentId) -> &'static str { - match id { - AgentId::ClaudeCode => "claude-code", - AgentId::Cursor => "cursor", - AgentId::GeminiCli => "gemini-cli", - AgentId::Codex => "codex", - AgentId::Augment => "augment", - AgentId::Cline => "cline", - AgentId::OpenCode => "opencode", - AgentId::Trae => "trae", - AgentId::Goose => "goose", - AgentId::Amp => "amp", - AgentId::Devin => "devin", - AgentId::Unknown => "unknown", - } + is_ai_agent::detect().map(|a| a.id.as_str()) } /// Append the `agent=` query parameter to a request when an AI coding @@ -68,33 +51,16 @@ mod tests { use super::*; #[test] - fn maps_every_known_agent_id() { - // Exhaustive smoke check that no AgentId variant gets the empty string. - for id in [ - AgentId::ClaudeCode, - AgentId::Cursor, - AgentId::GeminiCli, - AgentId::Codex, - AgentId::Augment, - AgentId::Cline, - AgentId::OpenCode, - AgentId::Trae, - AgentId::Goose, - AgentId::Amp, - AgentId::Devin, - AgentId::Unknown, - ] { - let s = agent_id_str(id); - assert!(!s.is_empty()); - // Identifier must be URL-safe — no spaces or uppercase. - assert!(s.chars().all(|c| c.is_ascii_lowercase() || c == '-')); - } - } - - #[test] - fn claude_code_id_is_kebab_case() { - assert_eq!(agent_id_str(AgentId::ClaudeCode), "claude-code"); - assert_eq!(agent_id_str(AgentId::GeminiCli), "gemini-cli"); + fn detected_agent_id_uses_crate_canonical_id() { + // Smoke test: when the crate's CLAUDECODE detection fires, we surface + // its `AgentId::as_str` ("claude-code") verbatim. Captures the + // contract we rely on rather than reasserting the crate's table. + let agent = is_ai_agent::detect_with( + |name| (name == "CLAUDECODE").then(|| "1".to_string()), + |_| false, + ) + .expect("CLAUDECODE should resolve to an Agent"); + assert_eq!(agent.id.as_str(), "claude-code"); } #[test] From b18b2931fc518f0ee4f4d0116da079fa1426a0b4 Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 20:06:55 +0100 Subject: [PATCH 09/12] Move agent attribution into User-Agent instead of per-request query params MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than thread an agent search param through every clickhouse.com request (with a URL-domain gate, two helpers, and a generic extra-query-params feature on the cloud library), fold the signal into the User-Agent header that every outbound request already carries: clickhousectl/0.1.18 (agent=claude-code). RFC 7231 allows parenthesised comments in User-Agent, and this matches conventional shapes (Mozilla/5.0 etc). Detection still uses is-ai-agent. The change deletes the agent_signal module, the cloud library extra_query_params API + tests, the URL-host parser, and the per-call-site wrappers in version_manager — net -146 lines vs the previous implementation. Every reqwest::Client::builder() in the codebase already calls user_agent::user_agent(), so the new attribution flows through with zero per-call-site wiring. --- CLAUDE.md | 2 +- README.md | 2 +- crates/clickhouse-cloud-api/src/client.rs | 29 +----- .../clickhouse-cloud-api/tests/client_test.rs | 65 -------------- crates/clickhousectl/src/agent_signal.rs | 90 ------------------- crates/clickhousectl/src/cloud/client.rs | 11 +-- crates/clickhousectl/src/main.rs | 1 - crates/clickhousectl/src/user_agent.rs | 54 ++++++++++- .../src/version_manager/download.rs | 3 +- .../clickhousectl/src/version_manager/list.rs | 5 +- .../src/version_manager/resolve.rs | 10 +-- 11 files changed, 63 insertions(+), 209 deletions(-) delete mode 100644 crates/clickhousectl/src/agent_signal.rs diff --git a/CLAUDE.md b/CLAUDE.md index 7f8850f..4d352d2 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -122,7 +122,7 @@ cargo add -p clickhouse-cloud-api url - `src/paths.rs` handles `~/.clickhouse/` paths (global install dir); `src/init.rs` handles `.clickhouse/` paths (project-local data dir) - `local client` uses `exec()` (process replacement), so code after `cmd.exec()` only runs on failure - Error types use `thiserror` in `src/error.rs`; cloud module has its own error type wrapped as `Error::Cloud(String)` -- AI agent attribution lives in `src/agent_signal.rs`. When the CLI runs under a detected agent, an `agent=` query param is appended to requests targeting ClickHouse-owned hosts only (helper: `add_agent_query_for(builder, url)`). The cloud library accepts default query params via `Client::with_extra_query_params`, populated by `tag_with_agent` in `cloud/client.rs`. Do not extend tagging to GitHub or other third-party hosts. +- AI agent attribution is folded into `src/user_agent.rs`. When the CLI runs under a detected agent, the User-Agent string becomes `clickhousectl/ (agent=)`. Detection uses `is_ai_agent::detect`. Every outbound `reqwest::Client` already calls `user_agent::user_agent()`, so no per-call-site wiring is needed. - Version resolution (`version_manager/resolve.rs`) handles specs like `stable`, `lts`, `25.12`, or exact `25.12.5.44` — all resolve to an exact version + channel via GitHub API - Releases are triggered by pushing a version tag (`v0.1.3`), which runs the GitHub Actions workflow diff --git a/README.md b/README.md index 792980a..1b78d63 100644 --- a/README.md +++ b/README.md @@ -581,7 +581,7 @@ The CLI also checks for updates in the background (at most once per 24 hours) an ## AI agent attribution -When `clickhousectl` is invoked from inside a known AI coding agent (e.g. Claude Code, Cursor, Gemini CLI, Codex, Goose), it tags outbound requests to ClickHouse-owned hosts (`builds.clickhouse.com`, `packages.clickhouse.com`, `api.clickhouse.cloud`) with an `agent=` query parameter. Detection is via the [`is-ai-agent`](https://crates.io/crates/is-ai-agent) crate, which reads the standard `AGENT` env var and tool-specific signals. No tag is added for human-driven invocations, and third-party download mirrors (GitHub Releases) are never annotated. +When `clickhousectl` is invoked from inside a known AI coding agent (e.g. Claude Code, Cursor, Gemini CLI, Codex, Goose), the User-Agent header on every outbound HTTP request is extended with the agent's canonical id — for example `User-Agent: clickhousectl/0.1.18 (agent=claude-code)`. Human-driven invocations send the bare `clickhousectl/0.1.18`. Detection is via the [`is-ai-agent`](https://crates.io/crates/is-ai-agent) crate, which reads the standard `AGENT` env var and tool-specific signals. ## Cloud integration testing diff --git a/crates/clickhouse-cloud-api/src/client.rs b/crates/clickhouse-cloud-api/src/client.rs index 5480216..ac75a6c 100644 --- a/crates/clickhouse-cloud-api/src/client.rs +++ b/crates/clickhouse-cloud-api/src/client.rs @@ -20,7 +20,6 @@ pub struct Client { http: reqwest::Client, base_url: String, auth: Auth, - extra_query_params: Vec<(String, String)>, } impl Client { @@ -42,7 +41,6 @@ impl Client { key_id: key_id.into(), key_secret: key_secret.into(), }, - extra_query_params: Vec::new(), } } @@ -57,7 +55,6 @@ impl Client { auth: Auth::Bearer { token: token.into(), }, - extra_query_params: Vec::new(), } } @@ -78,7 +75,6 @@ impl Client { key_id: key_id.into(), key_secret: key_secret.into(), }, - extra_query_params: Vec::new(), } } @@ -97,26 +93,9 @@ impl Client { auth: Auth::Bearer { token: token.into(), }, - extra_query_params: Vec::new(), } } - /// Attach extra query parameters that should be appended to every request - /// this client makes. Useful for callers that want to surface a CLI- or - /// runtime-level signal (e.g. an `agent` tag) to the API for analytics. - /// - /// Multiple calls accumulate; existing params are preserved. - pub fn with_extra_query_params(mut self, params: I) -> Self - where - I: IntoIterator, - K: Into, - V: Into, - { - self.extra_query_params - .extend(params.into_iter().map(|(k, v)| (k.into(), v.into()))); - self - } - /// Replace the Bearer token without rebuilding the client. /// /// Useful for refreshing an expired OAuth token. @@ -134,17 +113,13 @@ impl Client { } fn request(&self, method: reqwest::Method, path: &str) -> reqwest::RequestBuilder { - let mut builder = self + let builder = self .http .request(method, format!("{}{}", self.base_url, path)); - builder = match &self.auth { + match &self.auth { Auth::Basic { key_id, key_secret } => builder.basic_auth(key_id, Some(key_secret)), Auth::Bearer { token } => builder.bearer_auth(token), - }; - if !self.extra_query_params.is_empty() { - builder = builder.query(&self.extra_query_params); } - builder } /// Get list of available organizations diff --git a/crates/clickhouse-cloud-api/tests/client_test.rs b/crates/clickhouse-cloud-api/tests/client_test.rs index 5f0f675..0b24396 100644 --- a/crates/clickhouse-cloud-api/tests/client_test.rs +++ b/crates/clickhouse-cloud-api/tests/client_test.rs @@ -2786,68 +2786,3 @@ async fn default_base_url_is_production() { // but we can verify the client is constructable without panicking. let _client = Client::new("key", "secret"); } - -// =========================================================================== -// Extra query params (e.g. agent attribution tag) -// =========================================================================== - -#[tokio::test] -async fn extra_query_params_are_applied_to_every_request() { - let mock_server = MockServer::start().await; - - Mock::given(method("GET")) - .and(path("/v1/organizations")) - .and(query_param("agent", "claude-code")) - .respond_with(ok_json(serde_json::json!([]))) - .mount(&mock_server) - .await; - - let client = Client::with_base_url(mock_server.uri(), "key", "secret") - .with_extra_query_params([("agent", "claude-code")]); - let resp = client.organization_get_list().await.unwrap(); - assert_eq!(resp.result.unwrap().len(), 0); -} - -#[tokio::test] -async fn extra_query_params_coexist_with_endpoint_query_params() { - // The library already attaches `from_date`/`to_date` to usageCost — make - // sure our default params don't clobber them. - let mock_server = MockServer::start().await; - - Mock::given(method("GET")) - .and(path("/v1/organizations/org-1/usageCost")) - .and(query_param("from_date", "2024-01-01")) - .and(query_param("to_date", "2024-01-31")) - .and(query_param("agent", "claude-code")) - .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ - "status": 200, - "result": { - "grandTotalCHC": 0.0, - "costsByDate": [] - } - }))) - .mount(&mock_server) - .await; - - let client = Client::with_base_url(mock_server.uri(), "key", "secret") - .with_extra_query_params([("agent", "claude-code")]); - client - .usage_cost_get("org-1", "2024-01-01", "2024-01-31", &[]) - .await - .unwrap(); -} - -#[tokio::test] -async fn empty_extra_query_params_keep_url_clean() { - // Client built with no extras should not append a stray empty `?`. - let mock_server = MockServer::start().await; - - Mock::given(method("GET")) - .and(path("/v1/organizations")) - .respond_with(ok_json(serde_json::json!([]))) - .mount(&mock_server) - .await; - - let client = Client::with_base_url(mock_server.uri(), "key", "secret"); - client.organization_get_list().await.unwrap(); -} diff --git a/crates/clickhousectl/src/agent_signal.rs b/crates/clickhousectl/src/agent_signal.rs deleted file mode 100644 index de4a98b..0000000 --- a/crates/clickhousectl/src/agent_signal.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Detect when the CLI is being driven by an AI coding agent and surface that -//! signal as an `agent` query parameter on outbound clickhouse.com / -//! clickhouse.cloud requests, so server-side analytics can attribute usage. -//! -//! Detection is delegated to the `is_ai_agent` crate, which inspects standard -//! and tool-specific environment variables (e.g. `AGENT`, `CLAUDECODE`). - -use reqwest::RequestBuilder; - -/// Canonical kebab-case identifier for the detected agent, as defined by the -/// `is-ai-agent` crate (`AgentId::as_str`). Returns `None` when no agent -/// signal is present. -pub fn detected_agent_id() -> Option<&'static str> { - is_ai_agent::detect().map(|a| a.id.as_str()) -} - -/// Append the `agent=` query parameter to a request when an AI coding -/// agent is detected. Pass-through when no agent is present. -pub fn add_agent_query(builder: RequestBuilder) -> RequestBuilder { - match detected_agent_id() { - Some(id) => builder.query(&[("agent", id)]), - None => builder, - } -} - -/// Like `add_agent_query`, but only annotates requests targeting -/// ClickHouse-owned hosts (so we don't leak the signal to GitHub or other -/// third-party download mirrors). -pub fn add_agent_query_for(builder: RequestBuilder, url: &str) -> RequestBuilder { - if is_clickhouse_url(url) { - add_agent_query(builder) - } else { - builder - } -} - -fn is_clickhouse_url(url: &str) -> bool { - let host = match url.split_once("://") { - Some((_, rest)) => rest.split('/').next().unwrap_or(""), - None => url, - }; - let host = host.split(':').next().unwrap_or(host); - host == "clickhouse.com" - || host == "clickhouse.cloud" - || host.ends_with(".clickhouse.com") - || host.ends_with(".clickhouse.cloud") -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn detected_agent_id_uses_crate_canonical_id() { - // Smoke test: when the crate's CLAUDECODE detection fires, we surface - // its `AgentId::as_str` ("claude-code") verbatim. Captures the - // contract we rely on rather than reasserting the crate's table. - let agent = is_ai_agent::detect_with( - |name| (name == "CLAUDECODE").then(|| "1".to_string()), - |_| false, - ) - .expect("CLAUDECODE should resolve to an Agent"); - assert_eq!(agent.id.as_str(), "claude-code"); - } - - #[test] - fn detects_clickhouse_owned_hosts() { - assert!(is_clickhouse_url("https://builds.clickhouse.com/master/amd64/clickhouse")); - assert!(is_clickhouse_url( - "https://packages.clickhouse.com/tgz/stable/clickhouse-common-static-25.12.9.61-amd64.tgz" - )); - assert!(is_clickhouse_url("https://api.clickhouse.cloud/v1/organizations")); - assert!(is_clickhouse_url("https://clickhouse.com/docs/cloud/")); - } - - #[test] - fn rejects_non_clickhouse_hosts() { - assert!(!is_clickhouse_url( - "https://github.com/ClickHouse/ClickHouse/releases/download/v25.12.5.44-stable/clickhouse-macos-aarch64" - )); - assert!(!is_clickhouse_url("https://api.github.com/repos/ClickHouse/ClickHouse/releases")); - } - - #[test] - fn rejects_lookalike_hosts() { - // Suffix match must be a true subdomain, not a substring of an attacker-controlled host. - assert!(!is_clickhouse_url("https://evil-clickhouse.com/path")); - assert!(!is_clickhouse_url("https://clickhouse.com.attacker.com/x")); - } -} diff --git a/crates/clickhousectl/src/cloud/client.rs b/crates/clickhousectl/src/cloud/client.rs index 7c4d52f..0679d6b 100644 --- a/crates/clickhousectl/src/cloud/client.rs +++ b/crates/clickhousectl/src/cloud/client.rs @@ -181,15 +181,6 @@ fn lib_base_url(cli_base_url: &str) -> String { .to_string() } -/// Tag every request with `agent=` when an AI coding agent is driving the -/// CLI, so server-side analytics can attribute usage. No-op for human users. -fn tag_with_agent(client: clickhouse_cloud_api::Client) -> clickhouse_cloud_api::Client { - match crate::agent_signal::detected_agent_id() { - Some(id) => client.with_extra_query_params([("agent", id)]), - None => client, - } -} - impl CloudClient { pub fn new( api_key: Option<&str>, @@ -217,7 +208,7 @@ impl CloudClient { }; Ok(Self { - lib_client: tag_with_agent(lib_client), + lib_client, auth_mode, auth_source: resolved.source, base_url: resolved.base_url, diff --git a/crates/clickhousectl/src/main.rs b/crates/clickhousectl/src/main.rs index b0e6f3e..f34f582 100644 --- a/crates/clickhousectl/src/main.rs +++ b/crates/clickhousectl/src/main.rs @@ -1,4 +1,3 @@ -mod agent_signal; mod cli; mod cloud; mod error; diff --git a/crates/clickhousectl/src/user_agent.rs b/crates/clickhousectl/src/user_agent.rs index 1155372..bf402c8 100644 --- a/crates/clickhousectl/src/user_agent.rs +++ b/crates/clickhousectl/src/user_agent.rs @@ -1,4 +1,56 @@ /// Returns the canonical user-agent string for all outbound HTTP requests. +/// +/// When invoked under a known AI coding agent (Claude Code, Cursor, OpenAI +/// Codex, Gemini CLI, Goose, Devin, etc.), the agent's canonical id is +/// appended in parentheses — e.g. `clickhousectl/0.1.18 (agent=claude-code)` +/// — so server-side analytics can attribute usage. RFC 7231 allows comments +/// in the User-Agent header, and parenthesised key=value pairs are a +/// conventional shape (cf. browsers' `Mozilla/5.0 (Windows NT 10.0; ...)`). pub fn user_agent() -> String { - format!("clickhousectl/{}", env!("CARGO_PKG_VERSION")) + let base = format!("clickhousectl/{}", env!("CARGO_PKG_VERSION")); + match is_ai_agent::detect() { + Some(agent) => format!("{} (agent={})", base, agent.id.as_str()), + None => base, + } +} + +/// Build a User-Agent string from an explicit (test-injected) detected agent. +/// Mirrors `user_agent` but lets tests exercise both branches deterministically. +#[cfg(test)] +fn user_agent_from(detected: Option<&str>) -> String { + let base = format!("clickhousectl/{}", env!("CARGO_PKG_VERSION")); + match detected { + Some(id) => format!("{} (agent={})", base, id), + None => base, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn human_invocation_is_just_name_and_version() { + assert_eq!( + user_agent_from(None), + format!("clickhousectl/{}", env!("CARGO_PKG_VERSION")) + ); + } + + #[test] + fn agent_invocation_appends_paren_comment() { + assert_eq!( + user_agent_from(Some("claude-code")), + format!("clickhousectl/{} (agent=claude-code)", env!("CARGO_PKG_VERSION")) + ); + } + + #[test] + fn live_user_agent_starts_with_canonical_prefix() { + // Live process check: we can't control the env reliably across CI + // hosts, but the prefix invariant must always hold. + let ua = user_agent(); + let prefix = format!("clickhousectl/{}", env!("CARGO_PKG_VERSION")); + assert!(ua == prefix || ua.starts_with(&format!("{prefix} ("))); + } } diff --git a/crates/clickhousectl/src/version_manager/download.rs b/crates/clickhousectl/src/version_manager/download.rs index d8d2da1..5e2f604 100644 --- a/crates/clickhousectl/src/version_manager/download.rs +++ b/crates/clickhousectl/src/version_manager/download.rs @@ -21,7 +21,8 @@ pub async fn download_url(url: &str, dest_path: &Path) -> Result<()> { .user_agent(crate::user_agent::user_agent()) .build()?; - let response = crate::agent_signal::add_agent_query_for(client.get(url), url) + let response = client + .get(url) .send() .await? .error_for_status() diff --git a/crates/clickhousectl/src/version_manager/list.rs b/crates/clickhousectl/src/version_manager/list.rs index 84574b9..46e1328 100644 --- a/crates/clickhousectl/src/version_manager/list.rs +++ b/crates/clickhousectl/src/version_manager/list.rs @@ -130,10 +130,7 @@ pub async fn list_available_versions_from_builds() -> Result> { for mm in (1..=12).rev() { let version_path = format!("{}.{}", yy, mm); let url = builds_probe_url(&version_path, &platform); - match crate::agent_signal::add_agent_query_for(client.head(&url), &url) - .send() - .await - { + match client.head(&url).send().await { Ok(resp) if resp.status().is_success() => { available.push(version_path); } diff --git a/crates/clickhousectl/src/version_manager/resolve.rs b/crates/clickhousectl/src/version_manager/resolve.rs index d7af9c5..4c334d7 100644 --- a/crates/clickhousectl/src/version_manager/resolve.rs +++ b/crates/clickhousectl/src/version_manager/resolve.rs @@ -83,10 +83,7 @@ async fn resolve_major(major: u32, platform: &Platform) -> Result { highest_available = Some(minor); } @@ -226,10 +223,7 @@ async fn probe_builds(version_path: &str, platform: &Platform) -> bool { Err(_) => return false, }; - match crate::agent_signal::add_agent_query_for(client.head(&url), &url) - .send() - .await - { + match client.head(&url).send().await { Ok(resp) => resp.status().is_success(), Err(_) => false, } From 91fda9f761731ff77e3e4b36891fcf19430dc8b4 Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 20:11:34 +0100 Subject: [PATCH 10/12] Drop README and CLAUDE.md notes about agent attribution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's now a one-line implementation detail of an analytics signal — not user-facing functionality, not configurable, not surprising for a future reader to understand from the user_agent.rs source. Doesn't earn a documentation entry. --- CLAUDE.md | 1 - README.md | 4 ---- 2 files changed, 5 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 4d352d2..e5bc5f8 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -122,7 +122,6 @@ cargo add -p clickhouse-cloud-api url - `src/paths.rs` handles `~/.clickhouse/` paths (global install dir); `src/init.rs` handles `.clickhouse/` paths (project-local data dir) - `local client` uses `exec()` (process replacement), so code after `cmd.exec()` only runs on failure - Error types use `thiserror` in `src/error.rs`; cloud module has its own error type wrapped as `Error::Cloud(String)` -- AI agent attribution is folded into `src/user_agent.rs`. When the CLI runs under a detected agent, the User-Agent string becomes `clickhousectl/ (agent=)`. Detection uses `is_ai_agent::detect`. Every outbound `reqwest::Client` already calls `user_agent::user_agent()`, so no per-call-site wiring is needed. - Version resolution (`version_manager/resolve.rs`) handles specs like `stable`, `lts`, `25.12`, or exact `25.12.5.44` — all resolve to an exact version + channel via GitHub API - Releases are triggered by pushing a version tag (`v0.1.3`), which runs the GitHub Actions workflow diff --git a/README.md b/README.md index 1b78d63..4ebfc35 100644 --- a/README.md +++ b/README.md @@ -579,10 +579,6 @@ clickhousectl update --check The CLI also checks for updates in the background (at most once per 24 hours) and displays a notice when a newer version is available. -## AI agent attribution - -When `clickhousectl` is invoked from inside a known AI coding agent (e.g. Claude Code, Cursor, Gemini CLI, Codex, Goose), the User-Agent header on every outbound HTTP request is extended with the agent's canonical id — for example `User-Agent: clickhousectl/0.1.18 (agent=claude-code)`. Human-driven invocations send the bare `clickhousectl/0.1.18`. Detection is via the [`is-ai-agent`](https://crates.io/crates/is-ai-agent) crate, which reads the standard `AGENT` env var and tool-specific signals. - ## Cloud integration testing Cloud API integration is tested against a real ClickHouse Cloud workspace via the library crate. All changes to cloud commands must pass CI testing before merge. Tests are in [`crates/clickhouse-cloud-api/tests/integration_test.rs`](crates/clickhouse-cloud-api/tests/integration_test.rs). From b0f232ee0a387af13bec46e312e45883d63a5941 Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 20:32:59 +0100 Subject: [PATCH 11/12] Bump rustls-webpki to 0.103.13 and rand to 0.9.4 Pull in upstream security fixes flagged by Dependabot. Both are transitive dependencies; lockfile-only update, no API or behaviour changes. Build and full workspace tests pass. --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66b22b2..1d1f6f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1369,9 +1369,9 @@ checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" [[package]] name = "rand" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea" dependencies = [ "rand_chacha", "rand_core", @@ -1614,9 +1614,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.10" +version = "0.103.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" dependencies = [ "aws-lc-rs", "ring", From 5520efb3c76bdd6f0d16396223a2129f18ddaebc Mon Sep 17 00:00:00 2001 From: sdairs Date: Mon, 4 May 2026 20:36:34 +0100 Subject: [PATCH 12/12] Bump clickhousectl to 0.2.0 Co-Authored-By: Claude Opus 4.7 (1M context) --- Cargo.lock | 2 +- crates/clickhousectl/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d1f6f1..17ef637 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -257,7 +257,7 @@ dependencies = [ [[package]] name = "clickhousectl" -version = "0.1.18" +version = "0.2.0" dependencies = [ "chrono", "clap", diff --git a/crates/clickhousectl/Cargo.toml b/crates/clickhousectl/Cargo.toml index 2f5715e..776c9dd 100644 --- a/crates/clickhousectl/Cargo.toml +++ b/crates/clickhousectl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "clickhousectl" -version = "0.1.18" +version = "0.2.0" edition = "2024" [dependencies]