From 90be78ab40cb66bedc5d30cdc064c2b50c98df32 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Tue, 14 Apr 2026 07:44:32 +0200 Subject: [PATCH 01/21] fix(pg-delta): declarative-sync-no-declarative-dir-set (#5078) * feat(declarative): add tests for skipping config updates when PgDelta is enabled - These tests verify that the configuration remains unchanged when PgDelta is enabled, ensuring the declarative directory is the source of truth. - Updated the WriteDeclarativeSchemas function to reflect the new behavior regarding PgDelta configuration. * fix(declarative): DSL change due to upgrade --- internal/db/declarative/declarative.go | 7 +-- internal/db/declarative/declarative_test.go | 60 +++++++++++++++++++ internal/db/diff/templates/pgdelta.ts | 9 ++- .../templates/pgdelta_declarative_export.ts | 17 +++--- 4 files changed, 81 insertions(+), 12 deletions(-) diff --git a/internal/db/declarative/declarative.go b/internal/db/declarative/declarative.go index 2a0454d01..f7c8ca002 100644 --- a/internal/db/declarative/declarative.go +++ b/internal/db/declarative/declarative.go @@ -235,10 +235,9 @@ func WriteDeclarativeSchemas(output diff.DeclarativeOutput, fsys afero.Fs) error return err } } - // When pg-delta has its own config section, the declarative path is the single - // source of truth there; do not overwrite [db.migrations] schema_paths. - if utils.IsPgDeltaEnabled() && utils.Config.Experimental.PgDelta != nil && - len(utils.Config.Experimental.PgDelta.DeclarativeSchemaPath) > 0 { + // When pg-delta is enabled, the declarative directory (default or configured) + // is the source of truth; do not overwrite [db.migrations] schema_paths. + if utils.IsPgDeltaEnabled() { return nil } utils.Config.Db.Migrations.SchemaPaths = []string{ diff --git a/internal/db/declarative/declarative_test.go b/internal/db/declarative/declarative_test.go index 73b6f473a..229e6ffe7 100644 --- a/internal/db/declarative/declarative_test.go +++ b/internal/db/declarative/declarative_test.go @@ -48,6 +48,34 @@ func TestWriteDeclarativeSchemas(t *testing.T) { assert.Contains(t, string(cfg), `"database"`) } +func TestWriteDeclarativeSchemasSkipsConfigUpdateWhenPgDeltaEnabled(t *testing.T) { + fsys := afero.NewMemMapFs() + originalConfig := "[db]\n" + require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte(originalConfig), 0644)) + original := utils.Config.Experimental.PgDelta + utils.Config.Experimental.PgDelta = &config.PgDeltaConfig{Enabled: true} + t.Cleanup(func() { + utils.Config.Experimental.PgDelta = original + }) + + output := diff.DeclarativeOutput{ + Files: []diff.DeclarativeFile{ + {Path: "schemas/public/tables/users.sql", SQL: "create table users(id bigint);"}, + }, + } + + err := WriteDeclarativeSchemas(output, fsys) + require.NoError(t, err) + + users, err := afero.ReadFile(fsys, filepath.Join(utils.DeclarativeDir, "schemas", "public", "tables", "users.sql")) + require.NoError(t, err) + assert.Equal(t, "create table users(id bigint);", string(users)) + + cfg, err := afero.ReadFile(fsys, utils.ConfigPath) + require.NoError(t, err) + assert.Equal(t, originalConfig, string(cfg)) +} + func TestTryCacheMigrationsCatalogWritesPrefixedCache(t *testing.T) { fsys := afero.NewMemMapFs() original := utils.Config.Experimental.PgDelta @@ -146,6 +174,38 @@ func TestWriteDeclarativeSchemasUsesConfiguredDir(t *testing.T) { assert.Contains(t, string(cfg), `db/decl`) } +func TestWriteDeclarativeSchemasSkipsConfigUpdateForPgDeltaCustomDir(t *testing.T) { + fsys := afero.NewMemMapFs() + originalConfig := "[db]\n" + require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte(originalConfig), 0644)) + original := utils.Config.Experimental.PgDelta + utils.Config.Experimental.PgDelta = &config.PgDeltaConfig{ + Enabled: true, + DeclarativeSchemaPath: filepath.Join(utils.SupabaseDirPath, "db", "decl"), + } + t.Cleanup(func() { + utils.Config.Experimental.PgDelta = original + }) + + output := diff.DeclarativeOutput{ + Files: []diff.DeclarativeFile{ + {Path: "cluster/roles.sql", SQL: "create role app;"}, + }, + } + + err := WriteDeclarativeSchemas(output, fsys) + require.NoError(t, err) + + rolesPath := filepath.Join(utils.SupabaseDirPath, "db", "decl", "cluster", "roles.sql") + roles, err := afero.ReadFile(fsys, rolesPath) + require.NoError(t, err) + assert.Equal(t, "create role app;", string(roles)) + + cfg, err := afero.ReadFile(fsys, utils.ConfigPath) + require.NoError(t, err) + assert.Equal(t, originalConfig, string(cfg)) +} + func TestWriteDeclarativeSchemasRejectsUnsafePath(t *testing.T) { // Export paths must stay within supabase/declarative to prevent traversal. fsys := afero.NewMemMapFs() diff --git a/internal/db/diff/templates/pgdelta.ts b/internal/db/diff/templates/pgdelta.ts index 37995c491..234c91ab0 100644 --- a/internal/db/diff/templates/pgdelta.ts +++ b/internal/db/diff/templates/pgdelta.ts @@ -21,7 +21,14 @@ const target = Deno.env.get("TARGET"); const includedSchemas = Deno.env.get("INCLUDED_SCHEMAS"); if (includedSchemas) { - supabase.filter = { schema: includedSchemas.split(",") }; + const schemas = includedSchemas.split(","); + const schemaFilter = { + or: [{ "*/schema": schemas }, { "schema/name": schemas }], + }; + // CompositionPattern `and` is valid FilterDSL; Deno's structural typing is strict on `or` branches. + supabase.filter = { + and: [supabase.filter!, schemaFilter], + } as typeof supabase.filter; } const formatOptionsRaw = Deno.env.get("FORMAT_OPTIONS"); diff --git a/internal/db/diff/templates/pgdelta_declarative_export.ts b/internal/db/diff/templates/pgdelta_declarative_export.ts index cdb59924f..dead372a7 100644 --- a/internal/db/diff/templates/pgdelta_declarative_export.ts +++ b/internal/db/diff/templates/pgdelta_declarative_export.ts @@ -22,20 +22,23 @@ async function resolveInput(ref: string | undefined) { const source = Deno.env.get("SOURCE"); const target = Deno.env.get("TARGET"); supabase.filter = { - // Also allow dropped extensions from migrations to be capted in the declarative schema export + // Also allow dropped extensions from migrations to be captured in the declarative schema export // TODO: fix upstream bug into pgdelta supabase integration or: [ - ...supabase.filter.or, - { type: "extension", operation: "drop", scope: "object" }, + ...supabase.filter!.or!, + { objectType: "extension", operation: "drop", scope: "object" }, ], }; const includedSchemas = Deno.env.get("INCLUDED_SCHEMAS"); if (includedSchemas) { - const schemaFilter = { schema: includedSchemas.split(",") }; - supabase.filter = supabase.filter - ? { and: [supabase.filter, schemaFilter] } - : schemaFilter; + const schemas = includedSchemas.split(","); + const schemaFilter = { + or: [{ "*/schema": schemas }, { "schema/name": schemas }], + }; + supabase.filter = { + and: [supabase.filter!, schemaFilter], + } as unknown as typeof supabase.filter; } const formatOptionsRaw = Deno.env.get("FORMAT_OPTIONS"); From a1942ee75ee94b306c2635cc3dccdd5f1373c7a3 Mon Sep 17 00:00:00 2001 From: fadymak Date: Tue, 14 Apr 2026 10:46:29 +0200 Subject: [PATCH 02/21] feat(auth): add support for configuring passkeys and webauthn (#5077) --- pkg/config/auth.go | 38 +++++++++++++++------- pkg/config/auth_test.go | 43 +++++++++++++++--------- pkg/config/config.go | 37 ++++++++++++--------- pkg/config/config_test.go | 56 ++++++++++++++++++++++++++++---- pkg/config/templates/config.toml | 3 ++ 5 files changed, 128 insertions(+), 49 deletions(-) diff --git a/pkg/config/auth.go b/pkg/config/auth.go index 82c708e37..c1795c897 100644 --- a/pkg/config/auth.go +++ b/pkg/config/auth.go @@ -163,6 +163,7 @@ type ( SigningKeysPath string `toml:"signing_keys_path" json:"signing_keys_path"` SigningKeys []JWK `toml:"-" json:"-"` Passkey *passkey `toml:"passkey" json:"passkey"` + Webauthn *webauthn `toml:"webauthn" json:"webauthn"` RateLimit rateLimit `toml:"rate_limit" json:"rate_limit"` Captcha *captcha `toml:"captcha" json:"captcha"` @@ -380,7 +381,10 @@ type ( } passkey struct { - Enabled bool `toml:"enabled" json:"enabled"` + Enabled bool `toml:"enabled" json:"enabled"` + } + + webauthn struct { RpDisplayName string `toml:"rp_display_name" json:"rp_display_name"` RpId string `toml:"rp_id" json:"rp_id"` RpOrigins []string `toml:"rp_origins" json:"rp_origins"` @@ -418,6 +422,9 @@ func (a *auth) ToUpdateAuthConfigBody() v1API.UpdateAuthConfigBody { if a.Passkey != nil { a.Passkey.toAuthConfigBody(&body) } + if a.Webauthn != nil { + a.Webauthn.toAuthConfigBody(&body) + } a.Hook.toAuthConfigBody(&body) a.MFA.toAuthConfigBody(&body) a.Sessions.toAuthConfigBody(&body) @@ -442,6 +449,7 @@ func (a *auth) FromRemoteAuthConfig(remoteConfig v1API.AuthConfigResponse) { prc := ValOrDefault(remoteConfig.PasswordRequiredCharacters, "") a.PasswordRequirements = NewPasswordRequirement(v1API.UpdateAuthConfigBodyPasswordRequiredCharacters(prc)) a.Passkey.fromAuthConfig(remoteConfig) + a.Webauthn.fromAuthConfig(remoteConfig) a.RateLimit.fromAuthConfig(remoteConfig) if s := a.Email.Smtp; s != nil && s.Enabled { a.RateLimit.EmailSent = cast.IntToUint(ValOrDefault(remoteConfig.RateLimitEmailSent, 0)) @@ -502,11 +510,7 @@ func (c *captcha) fromAuthConfig(remoteConfig v1API.AuthConfigResponse) { } func (p passkey) toAuthConfigBody(body *v1API.UpdateAuthConfigBody) { - if body.PasskeyEnabled = cast.Ptr(p.Enabled); p.Enabled { - body.WebauthnRpDisplayName = nullable.NewNullableWithValue(p.RpDisplayName) - body.WebauthnRpId = nullable.NewNullableWithValue(p.RpId) - body.WebauthnRpOrigins = nullable.NewNullableWithValue(strings.Join(p.RpOrigins, ",")) - } + body.PasskeyEnabled = cast.Ptr(p.Enabled) } func (p *passkey) fromAuthConfig(remoteConfig v1API.AuthConfigResponse) { @@ -514,15 +518,25 @@ func (p *passkey) fromAuthConfig(remoteConfig v1API.AuthConfigResponse) { if p == nil { return } - // Ignore disabled passkey fields to minimise config diff - if p.Enabled { - p.RpDisplayName = ValOrDefault(remoteConfig.WebauthnRpDisplayName, "") - p.RpId = ValOrDefault(remoteConfig.WebauthnRpId, "") - p.RpOrigins = strToArr(ValOrDefault(remoteConfig.WebauthnRpOrigins, "")) - } p.Enabled = remoteConfig.PasskeyEnabled } +func (w webauthn) toAuthConfigBody(body *v1API.UpdateAuthConfigBody) { + body.WebauthnRpDisplayName = nullable.NewNullableWithValue(w.RpDisplayName) + body.WebauthnRpId = nullable.NewNullableWithValue(w.RpId) + body.WebauthnRpOrigins = nullable.NewNullableWithValue(strings.Join(w.RpOrigins, ",")) +} + +func (w *webauthn) fromAuthConfig(remoteConfig v1API.AuthConfigResponse) { + // When local config is not set, we assume platform defaults should not change + if w == nil { + return + } + w.RpDisplayName = ValOrDefault(remoteConfig.WebauthnRpDisplayName, "") + w.RpId = ValOrDefault(remoteConfig.WebauthnRpId, "") + w.RpOrigins = strToArr(ValOrDefault(remoteConfig.WebauthnRpOrigins, "")) +} + func (h hook) toAuthConfigBody(body *v1API.UpdateAuthConfigBody) { // When local config is not set, we assume platform defaults should not change if hook := h.BeforeUserCreated; hook != nil { diff --git a/pkg/config/auth_test.go b/pkg/config/auth_test.go index 65f0066da..61ba5b429 100644 --- a/pkg/config/auth_test.go +++ b/pkg/config/auth_test.go @@ -215,8 +215,8 @@ func TestCaptchaDiff(t *testing.T) { func TestPasskeyConfigMapping(t *testing.T) { t.Run("serializes passkey config to update body", func(t *testing.T) { c := newWithDefaults() - c.Passkey = &passkey{ - Enabled: true, + c.Passkey = &passkey{Enabled: true} + c.Webauthn = &webauthn{ RpDisplayName: "Supabase CLI", RpId: "localhost", RpOrigins: []string{ @@ -235,14 +235,9 @@ func TestPasskeyConfigMapping(t *testing.T) { assert.Equal(t, "http://127.0.0.1:3000,https://localhost:3000", ValOrDefault(body.WebauthnRpOrigins, "")) }) - t.Run("does not serialize rp fields when passkey is disabled", func(t *testing.T) { + t.Run("does not serialize rp fields when webauthn is undefined", func(t *testing.T) { c := newWithDefaults() - c.Passkey = &passkey{ - Enabled: false, - RpDisplayName: "Supabase CLI", - RpId: "localhost", - RpOrigins: []string{"http://127.0.0.1:3000"}, - } + c.Passkey = &passkey{Enabled: false} // Run test body := c.ToUpdateAuthConfigBody() // Check result @@ -257,12 +252,27 @@ func TestPasskeyConfigMapping(t *testing.T) { assert.Error(t, err) }) - t.Run("hydrates passkey config from remote", func(t *testing.T) { + t.Run("serializes webauthn fields independently of passkey", func(t *testing.T) { c := newWithDefaults() - c.Passkey = &passkey{ - Enabled: true, + c.Webauthn = &webauthn{ + RpDisplayName: "Supabase CLI", + RpId: "localhost", + RpOrigins: []string{"http://127.0.0.1:3000"}, } // Run test + body := c.ToUpdateAuthConfigBody() + // Check result + assert.Nil(t, body.PasskeyEnabled) + assert.Equal(t, "Supabase CLI", ValOrDefault(body.WebauthnRpDisplayName, "")) + assert.Equal(t, "localhost", ValOrDefault(body.WebauthnRpId, "")) + assert.Equal(t, "http://127.0.0.1:3000", ValOrDefault(body.WebauthnRpOrigins, "")) + }) + + t.Run("hydrates passkey and webauthn config from remote", func(t *testing.T) { + c := newWithDefaults() + c.Passkey = &passkey{Enabled: true} + c.Webauthn = &webauthn{} + // Run test c.FromRemoteAuthConfig(v1API.AuthConfigResponse{ PasskeyEnabled: true, WebauthnRpDisplayName: nullable.NewNullableWithValue("Supabase CLI"), @@ -272,12 +282,14 @@ func TestPasskeyConfigMapping(t *testing.T) { // Check result if assert.NotNil(t, c.Passkey) { assert.True(t, c.Passkey.Enabled) - assert.Equal(t, "Supabase CLI", c.Passkey.RpDisplayName) - assert.Equal(t, "localhost", c.Passkey.RpId) + } + if assert.NotNil(t, c.Webauthn) { + assert.Equal(t, "Supabase CLI", c.Webauthn.RpDisplayName) + assert.Equal(t, "localhost", c.Webauthn.RpId) assert.Equal(t, []string{ "http://127.0.0.1:3000", "https://localhost:3000", - }, c.Passkey.RpOrigins) + }, c.Webauthn.RpOrigins) } }) @@ -292,6 +304,7 @@ func TestPasskeyConfigMapping(t *testing.T) { }) // Check result assert.Nil(t, c.Passkey) + assert.Nil(t, c.Webauthn) }) } diff --git a/pkg/config/config.go b/pkg/config/config.go index 90d81741b..2aa7f99f2 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -262,9 +262,13 @@ func (a *auth) Clone() auth { } if copy.Passkey != nil { passkey := *a.Passkey - passkey.RpOrigins = slices.Clone(a.Passkey.RpOrigins) copy.Passkey = &passkey } + if copy.Webauthn != nil { + webauthn := *a.Webauthn + webauthn.RpOrigins = slices.Clone(a.Webauthn.RpOrigins) + copy.Webauthn = &webauthn + } copy.External = maps.Clone(a.External) if a.Email.Smtp != nil { mailer := *a.Email.Smtp @@ -921,21 +925,22 @@ func (c *config) Validate(fsys fs.FS) error { return errors.Errorf("failed to decode signing keys: %w", err) } } - if c.Auth.Passkey != nil { - if c.Auth.Passkey.Enabled { - if len(c.Auth.Passkey.RpId) == 0 { - return errors.New("Missing required field in config: auth.passkey.rp_id") - } - if len(c.Auth.Passkey.RpOrigins) == 0 { - return errors.New("Missing required field in config: auth.passkey.rp_origins") - } - if err := assertEnvLoaded(c.Auth.Passkey.RpId); err != nil { - return errors.Errorf("Invalid config for auth.passkey.rp_id: %v", err) - } - for i, origin := range c.Auth.Passkey.RpOrigins { - if err := assertEnvLoaded(origin); err != nil { - return errors.Errorf("Invalid config for auth.passkey.rp_origins[%d]: %v", i, err) - } + if c.Auth.Passkey != nil && c.Auth.Passkey.Enabled { + if c.Auth.Webauthn == nil { + return errors.New("Missing required config section: auth.webauthn (required when auth.passkey.enabled is true)") + } + if len(c.Auth.Webauthn.RpId) == 0 { + return errors.New("Missing required field in config: auth.webauthn.rp_id") + } + if len(c.Auth.Webauthn.RpOrigins) == 0 { + return errors.New("Missing required field in config: auth.webauthn.rp_origins") + } + if err := assertEnvLoaded(c.Auth.Webauthn.RpId); err != nil { + return errors.Errorf("Invalid config for auth.webauthn.rp_id: %v", err) + } + for i, origin := range c.Auth.Webauthn.RpOrigins { + if err := assertEnvLoaded(origin); err != nil { + return errors.Errorf("Invalid config for auth.webauthn.rp_origins[%d]: %v", i, err) } } } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 695733116..f019b7cbc 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -74,7 +74,7 @@ func TestConfigParsing(t *testing.T) { // Run test assert.Error(t, config.Load("", fsys)) }) - t.Run("config file with passkey settings", func(t *testing.T) { + t.Run("config file with passkey and webauthn settings", func(t *testing.T) { config := NewConfig() fsys := fs.MapFS{ "supabase/config.toml": &fs.MapFile{Data: []byte(` @@ -83,6 +83,7 @@ enabled = true site_url = "http://127.0.0.1:3000" [auth.passkey] enabled = true +[auth.webauthn] rp_display_name = "Supabase CLI" rp_id = "localhost" rp_origins = ["http://127.0.0.1:3000", "https://localhost:3000"] @@ -93,15 +94,56 @@ rp_origins = ["http://127.0.0.1:3000", "https://localhost:3000"] // Check result if assert.NotNil(t, config.Auth.Passkey) { assert.True(t, config.Auth.Passkey.Enabled) - assert.Equal(t, "Supabase CLI", config.Auth.Passkey.RpDisplayName) - assert.Equal(t, "localhost", config.Auth.Passkey.RpId) + } + if assert.NotNil(t, config.Auth.Webauthn) { + assert.Equal(t, "Supabase CLI", config.Auth.Webauthn.RpDisplayName) + assert.Equal(t, "localhost", config.Auth.Webauthn.RpId) assert.Equal(t, []string{ "http://127.0.0.1:3000", "https://localhost:3000", - }, config.Auth.Passkey.RpOrigins) + }, config.Auth.Webauthn.RpOrigins) } }) + t.Run("webauthn section without passkey loads successfully", func(t *testing.T) { + config := NewConfig() + fsys := fs.MapFS{ + "supabase/config.toml": &fs.MapFile{Data: []byte(` +[auth] +enabled = true +site_url = "http://127.0.0.1:3000" +[auth.webauthn] +rp_display_name = "Supabase CLI" +rp_id = "localhost" +rp_origins = ["http://127.0.0.1:3000"] +`)}, + } + // Run test + assert.NoError(t, config.Load("", fsys)) + // Check result + assert.Nil(t, config.Auth.Passkey) + if assert.NotNil(t, config.Auth.Webauthn) { + assert.Equal(t, "localhost", config.Auth.Webauthn.RpId) + } + }) + + t.Run("passkey enabled requires webauthn section", func(t *testing.T) { + config := NewConfig() + fsys := fs.MapFS{ + "supabase/config.toml": &fs.MapFile{Data: []byte(` +[auth] +enabled = true +site_url = "http://127.0.0.1:3000" +[auth.passkey] +enabled = true +`)}, + } + // Run test + err := config.Load("", fsys) + // Check result + assert.ErrorContains(t, err, "Missing required config section: auth.webauthn") + }) + t.Run("passkey enabled requires rp_id", func(t *testing.T) { config := NewConfig() fsys := fs.MapFS{ @@ -111,13 +153,14 @@ enabled = true site_url = "http://127.0.0.1:3000" [auth.passkey] enabled = true +[auth.webauthn] rp_origins = ["http://127.0.0.1:3000"] `)}, } // Run test err := config.Load("", fsys) // Check result - assert.ErrorContains(t, err, "Missing required field in config: auth.passkey.rp_id") + assert.ErrorContains(t, err, "Missing required field in config: auth.webauthn.rp_id") }) t.Run("passkey enabled requires rp_origins", func(t *testing.T) { @@ -129,13 +172,14 @@ enabled = true site_url = "http://127.0.0.1:3000" [auth.passkey] enabled = true +[auth.webauthn] rp_id = "localhost" `)}, } // Run test err := config.Load("", fsys) // Check result - assert.ErrorContains(t, err, "Missing required field in config: auth.passkey.rp_origins") + assert.ErrorContains(t, err, "Missing required field in config: auth.webauthn.rp_origins") }) t.Run("parses experimental pgdelta config", func(t *testing.T) { diff --git a/pkg/config/templates/config.toml b/pkg/config/templates/config.toml index 2909f8223..97ed4e566 100644 --- a/pkg/config/templates/config.toml +++ b/pkg/config/templates/config.toml @@ -180,6 +180,9 @@ password_requirements = "" # Configure passkey sign-ins. # [auth.passkey] # enabled = false + +# Configure WebAuthn relying party settings (required when passkey is enabled). +# [auth.webauthn] # rp_display_name = "Supabase" # rp_id = "localhost" # rp_origins = ["http://127.0.0.1:3000"] From 3be2887eb4cd6ab4cddbbe3f6bcadce78549d233 Mon Sep 17 00:00:00 2001 From: Vaibhav <117663341+7ttp@users.noreply.github.com> Date: Tue, 14 Apr 2026 22:26:58 +0530 Subject: [PATCH 03/21] fix: atomic parser (#5064) * fix * test --------- Co-authored-by: Andrew Valleteau --- pkg/parser/state.go | 30 ++++++++++++++++++++++++++++-- pkg/parser/state_test.go | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/pkg/parser/state.go b/pkg/parser/state.go index f32a67131..47775390d 100644 --- a/pkg/parser/state.go +++ b/pkg/parser/state.go @@ -46,14 +46,40 @@ func (s *ReadyState) Next(r rune, data []byte) State { case 'c': fallthrough case 'C': - offset := len(data) - len(BEGIN_ATOMIC) - if offset >= 0 && strings.EqualFold(string(data[offset:]), BEGIN_ATOMIC) { + if isBeginAtomic(data) { return &AtomicState{prev: s, delimiter: []byte(END_ATOMIC)} } } return s } +func isBeginAtomic(data []byte) bool { + offset := len(data) - len(BEGIN_ATOMIC) + if offset < 0 || !strings.EqualFold(string(data[offset:]), BEGIN_ATOMIC) { + return false + } + if offset > 0 { + r, _ := utf8.DecodeLastRune(data[:offset]) + if isIdentifierRune(r) { + return false + } + } + prefix := bytes.TrimRightFunc(data[:offset], unicode.IsSpace) + offset = len(prefix) - len("BEGIN") + if offset < 0 || !strings.EqualFold(string(prefix[offset:]), "BEGIN") { + return false + } + if offset == 0 { + return true + } + r, _ := utf8.DecodeLastRune(prefix[:offset]) + return !isIdentifierRune(r) +} + +func isIdentifierRune(r rune) bool { + return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' || r == '$' +} + // Opened a line comment type CommentState struct{} diff --git a/pkg/parser/state_test.go b/pkg/parser/state_test.go index bae10fe19..ad6db9d26 100644 --- a/pkg/parser/state_test.go +++ b/pkg/parser/state_test.go @@ -167,4 +167,44 @@ END ;`} checkSplit(t, sql) }) + + t.Run("ignores atomic in identifiers", func(t *testing.T) { + names := []string{ + "fn_atomic", + "atomic_fn", + "my_atomic_thing", + "xatomicx", + "fn_ATomiC", + } + for _, name := range names { + t.Run(name, func(t *testing.T) { + sql := []string{ + `CREATE OR REPLACE FUNCTION ` + name + `() +RETURNS void LANGUAGE plpgsql AS $$ +BEGIN + NULL; +END; +$$;`, + ` +SELECT 1;`, + } + checkSplit(t, sql) + }) + } + }) + + t.Run("does not treat schema-qualified atomic function names as begin atomic", func(t *testing.T) { + sql := []string{`CREATE OR REPLACE FUNCTION public.atomic_example() +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +BEGIN + RETURN 1; +END; +$$;`, + ` +GRANT EXECUTE ON FUNCTION public.atomic_example() TO authenticated;`, + } + checkSplit(t, sql) + }) } From 5be6e902234239f23889a8bcb9fda5e8d08dd8c9 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Wed, 15 Apr 2026 17:13:56 +0200 Subject: [PATCH 04/21] fix(pg-delta): declarative apply error results (#5082) * fix(pg-delta): declarative apply error results Improve readability report for decalrative appy errors wrapping * chore: upgrade pg-delta to alpha 13 --- internal/db/diff/templates/pgdelta.ts | 4 +- .../diff/templates/pgdelta_catalog_export.ts | 2 +- .../templates/pgdelta_declarative_export.ts | 14 +- internal/db/pgcache/cache.go | 2 +- internal/pgdelta/apply.go | 149 ++++++++++++++++-- internal/pgdelta/apply_test.go | 95 +++++++++++ .../templates/pgdelta_declarative_apply.ts | 2 +- 7 files changed, 239 insertions(+), 29 deletions(-) create mode 100644 internal/pgdelta/apply_test.go diff --git a/internal/db/diff/templates/pgdelta.ts b/internal/db/diff/templates/pgdelta.ts index 234c91ab0..bb34eb973 100644 --- a/internal/db/diff/templates/pgdelta.ts +++ b/internal/db/diff/templates/pgdelta.ts @@ -2,8 +2,8 @@ import { createPlan, deserializeCatalog, formatSqlStatements, -} from "npm:@supabase/pg-delta@1.0.0-alpha.11"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.11/integrations/supabase"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.13/integrations/supabase"; async function resolveInput(ref: string | undefined) { if (!ref) { diff --git a/internal/db/diff/templates/pgdelta_catalog_export.ts b/internal/db/diff/templates/pgdelta_catalog_export.ts index cdadf00f8..e4de7c7b7 100644 --- a/internal/db/diff/templates/pgdelta_catalog_export.ts +++ b/internal/db/diff/templates/pgdelta_catalog_export.ts @@ -5,7 +5,7 @@ import { extractCatalog, serializeCatalog, stringifyCatalogSnapshot, -} from "npm:@supabase/pg-delta@1.0.0-alpha.11"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; const target = Deno.env.get("TARGET"); const role = Deno.env.get("ROLE") ?? undefined; diff --git a/internal/db/diff/templates/pgdelta_declarative_export.ts b/internal/db/diff/templates/pgdelta_declarative_export.ts index dead372a7..c25f32a9a 100644 --- a/internal/db/diff/templates/pgdelta_declarative_export.ts +++ b/internal/db/diff/templates/pgdelta_declarative_export.ts @@ -5,8 +5,8 @@ import { createPlan, deserializeCatalog, exportDeclarativeSchema, -} from "npm:@supabase/pg-delta@1.0.0-alpha.11"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.11/integrations/supabase"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.13/integrations/supabase"; async function resolveInput(ref: string | undefined) { if (!ref) { @@ -21,14 +21,6 @@ async function resolveInput(ref: string | undefined) { const source = Deno.env.get("SOURCE"); const target = Deno.env.get("TARGET"); -supabase.filter = { - // Also allow dropped extensions from migrations to be captured in the declarative schema export - // TODO: fix upstream bug into pgdelta supabase integration - or: [ - ...supabase.filter!.or!, - { objectType: "extension", operation: "drop", scope: "object" }, - ], -}; const includedSchemas = Deno.env.get("INCLUDED_SCHEMAS"); if (includedSchemas) { @@ -46,7 +38,6 @@ let formatOptions = undefined; if (formatOptionsRaw) { formatOptions = JSON.parse(formatOptionsRaw); } - try { const result = await createPlan( await resolveInput(source), @@ -66,6 +57,7 @@ try { ); } else { const output = exportDeclarativeSchema(result, { + integration: supabase, formatOptions, }); console.log( diff --git a/internal/db/pgcache/cache.go b/internal/db/pgcache/cache.go index d67552e10..b6831cb9b 100644 --- a/internal/db/pgcache/cache.go +++ b/internal/db/pgcache/cache.go @@ -34,7 +34,7 @@ import { extractCatalog, serializeCatalog, stringifyCatalogSnapshot, -} from "npm:@supabase/pg-delta@1.0.0-alpha.11"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; const target = Deno.env.get("TARGET"); const role = Deno.env.get("ROLE") ?? undefined; if (!target) { diff --git a/internal/pgdelta/apply.go b/internal/pgdelta/apply.go index db8453a90..3683d6269 100644 --- a/internal/pgdelta/apply.go +++ b/internal/pgdelta/apply.go @@ -8,10 +8,12 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/go-errors/errors" "github.com/jackc/pgconn" "github.com/spf13/afero" + "github.com/spf13/viper" "github.com/supabase/cli/internal/utils" ) @@ -22,13 +24,129 @@ var pgDeltaDeclarativeApplyScript string // // The fields are surfaced to provide concise CLI feedback after apply runs. type ApplyResult struct { - Status string `json:"status"` - TotalStatements int `json:"totalStatements"` - TotalRounds int `json:"totalRounds"` - TotalApplied int `json:"totalApplied"` - TotalSkipped int `json:"totalSkipped"` - Errors []string `json:"errors"` - StuckStatements []string `json:"stuckStatements"` + Status string `json:"status"` + TotalStatements int `json:"totalStatements"` + TotalRounds int `json:"totalRounds"` + TotalApplied int `json:"totalApplied"` + TotalSkipped int `json:"totalSkipped"` + Errors []ApplyIssue `json:"errors"` + StuckStatements []ApplyIssue `json:"stuckStatements"` +} + +// ApplyIssue models a pg-delta apply error or stuck statement. +// +// pg-delta may emit either a plain string or a structured object, so unmarshal +// needs to gracefully handle both forms. +type ApplyIssue struct { + Statement *ApplyStatement `json:"statement,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + IsDependencyError bool `json:"isDependencyError,omitempty"` +} + +type ApplyStatement struct { + ID string `json:"id"` + SQL string `json:"sql"` + StatementClass string `json:"statementClass"` +} + +func (i *ApplyIssue) UnmarshalJSON(data []byte) error { + trimmed := bytes.TrimSpace(data) + if bytes.Equal(trimmed, []byte("null")) { + *i = ApplyIssue{} + return nil + } + var message string + if err := json.Unmarshal(trimmed, &message); err == nil { + *i = ApplyIssue{Message: message} + return nil + } + type alias ApplyIssue + var parsed alias + if err := json.Unmarshal(trimmed, &parsed); err != nil { + return err + } + *i = ApplyIssue(parsed) + return nil +} + +func formatApplyFailure(result ApplyResult) string { + totalStatements := result.TotalStatements + if totalStatements == 0 { + totalStatements = result.TotalApplied + result.TotalSkipped + len(result.StuckStatements) + } + lines := []string{ + fmt.Sprintf("pg-delta apply returned status %q.", result.Status), + fmt.Sprintf("%d/%d statements applied in %d round(s); %d skipped.", result.TotalApplied, totalStatements, result.TotalRounds, result.TotalSkipped), + } + if len(result.Errors) > 0 { + lines = append(lines, "Errors:") + for _, issue := range result.Errors { + lines = append(lines, formatApplyIssue(issue)) + } + } + if len(result.StuckStatements) > 0 { + lines = append(lines, "Stuck statements:") + for _, issue := range result.StuckStatements { + lines = append(lines, formatApplyIssue(issue)) + } + } + return strings.Join(lines, "\n") +} + +func formatApplyIssue(issue ApplyIssue) string { + if issue.Statement == nil { + return "- " + formatApplyIssueMessage(issue) + } + title := "- " + issue.Statement.ID + if issue.Statement.StatementClass != "" { + title += " [" + issue.Statement.StatementClass + "]" + } + lines := []string{title} + lines = append(lines, " "+formatApplyIssueMessage(issue)) + if sql := formatStatementSQL(issue.Statement.SQL); sql != "" { + lines = append(lines, " SQL: "+sql) + } + return strings.Join(lines, "\n") +} + +func formatApplyIssueMessage(issue ApplyIssue) string { + message := strings.TrimSpace(issue.Message) + if message == "" { + message = "unknown pg-delta issue" + } + var metadata []string + if issue.Code != "" { + metadata = append(metadata, "SQLSTATE "+issue.Code) + } + if issue.IsDependencyError { + metadata = append(metadata, "dependency error") + } + if len(metadata) == 0 { + return message + } + return fmt.Sprintf("%s (%s)", message, strings.Join(metadata, ", ")) +} + +func formatStatementSQL(sql string) string { + normalized := strings.Join(strings.Fields(sql), " ") + const maxLen = 120 + if len(normalized) <= maxLen { + return normalized + } + return normalized[:maxLen-3] + "..." +} + +func formatDebugJSON(raw []byte) string { + trimmed := bytes.TrimSpace(raw) + if len(trimmed) == 0 { + return "" + } + var indented bytes.Buffer + if err := json.Indent(&indented, trimmed, "", " "); err == nil { + return indented.String() + } + return string(trimmed) } // ApplyDeclarative applies files from supabase/declarative to the target @@ -64,14 +182,19 @@ func ApplyDeclarative(ctx context.Context, config pgconn.Config, fsys afero.Fs) var result ApplyResult if err := json.Unmarshal(stdout.Bytes(), &result); err != nil { - return errors.Errorf("failed to parse pg-delta apply output: %w\nstdout: %s", err, stdout.String()) + if viper.GetBool("DEBUG") { + return errors.Errorf("failed to parse pg-delta apply output: %w\nstdout: %s", err, stdout.String()) + } + return errors.Errorf("failed to parse pg-delta apply output: %w", err) } if result.Status != "success" { - if len(result.Errors) > 0 { - fmt.Fprintf(os.Stderr, "Errors: %v\n", result.Errors) - } - if len(result.StuckStatements) > 0 { - fmt.Fprintf(os.Stderr, "Stuck statements: %v\n", result.StuckStatements) + if viper.GetBool("DEBUG") { + if debugJSON := formatDebugJSON(stdout.Bytes()); len(debugJSON) > 0 { + fmt.Fprintln(os.Stderr, "pg-delta apply result:") + fmt.Fprintln(os.Stderr, debugJSON) + } + } else { + fmt.Fprintln(os.Stderr, formatApplyFailure(result)) } return errors.Errorf("pg-delta declarative apply failed with status: %s", result.Status) } diff --git a/internal/pgdelta/apply_test.go b/internal/pgdelta/apply_test.go new file mode 100644 index 000000000..af9cfb001 --- /dev/null +++ b/internal/pgdelta/apply_test.go @@ -0,0 +1,95 @@ +package pgdelta + +import ( + "encoding/json" + "strings" + "testing" +) + +func TestApplyResultUnmarshalStructuredStuckStatements(t *testing.T) { + raw := []byte(`{ + "status": "stuck", + "totalStatements": 34, + "totalRounds": 2, + "totalApplied": 29, + "totalSkipped": 0, + "errors": [], + "stuckStatements": [ + { + "statement": { + "id": "cluster/extensions/pgmq.sql:0", + "sql": "CREATE EXTENSION pgmq WITH SCHEMA pgmq;", + "statementClass": "CREATE_EXTENSION" + }, + "code": "3F000", + "message": "schema \"pgmq\" does not exist", + "isDependencyError": true + } + ] + }`) + + var result ApplyResult + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("json.Unmarshal() error = %v", err) + } + + if got, want := len(result.StuckStatements), 1; got != want { + t.Fatalf("len(StuckStatements) = %d, want %d", got, want) + } + + stuck := result.StuckStatements[0] + if stuck.Statement == nil { + t.Fatal("expected structured statement details") + } + if got, want := stuck.Statement.ID, "cluster/extensions/pgmq.sql:0"; got != want { + t.Fatalf("Statement.ID = %q, want %q", got, want) + } + if got, want := stuck.Statement.StatementClass, "CREATE_EXTENSION"; got != want { + t.Fatalf("Statement.StatementClass = %q, want %q", got, want) + } + if got, want := stuck.Code, "3F000"; got != want { + t.Fatalf("Code = %q, want %q", got, want) + } + if got, want := stuck.Message, `schema "pgmq" does not exist`; got != want { + t.Fatalf("Message = %q, want %q", got, want) + } + if !stuck.IsDependencyError { + t.Fatal("expected dependency error to be preserved") + } +} + +func TestFormatApplyFailure(t *testing.T) { + result := ApplyResult{ + Status: "stuck", + TotalStatements: 34, + TotalRounds: 2, + TotalApplied: 29, + TotalSkipped: 0, + StuckStatements: []ApplyIssue{ + { + Statement: &ApplyStatement{ + ID: "cluster/extensions/pgmq.sql:0", + SQL: "CREATE EXTENSION pgmq WITH SCHEMA pgmq;", + StatementClass: "CREATE_EXTENSION", + }, + Code: "3F000", + Message: `schema "pgmq" does not exist`, + IsDependencyError: true, + }, + }, + } + + formatted := formatApplyFailure(result) + assertContains(t, formatted, `pg-delta apply returned status "stuck"`) + assertContains(t, formatted, `29/34 statements applied in 2 round(s)`) + assertContains(t, formatted, `cluster/extensions/pgmq.sql:0 [CREATE_EXTENSION]`) + assertContains(t, formatted, `schema "pgmq" does not exist (SQLSTATE 3F000, dependency error)`) + assertContains(t, formatted, `SQL: CREATE EXTENSION pgmq WITH SCHEMA pgmq;`) +} + +func assertContains(t *testing.T, text, want string) { + t.Helper() + if !strings.Contains(text, want) { + t.Fatalf("expected %q to contain %q", text, want) + } +} diff --git a/internal/pgdelta/templates/pgdelta_declarative_apply.ts b/internal/pgdelta/templates/pgdelta_declarative_apply.ts index 1c43421b8..b6747e150 100644 --- a/internal/pgdelta/templates/pgdelta_declarative_apply.ts +++ b/internal/pgdelta/templates/pgdelta_declarative_apply.ts @@ -3,7 +3,7 @@ import { applyDeclarativeSchema, loadDeclarativeSchema, -} from "npm:@supabase/pg-delta@1.0.0-alpha.11/declarative"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.13/declarative"; const schemaPath = Deno.env.get("SCHEMA_PATH"); const target = Deno.env.get("TARGET"); From fa4926269c2fa803b1651a69ad1e06d0d2886335 Mon Sep 17 00:00:00 2001 From: Sean Oliver <882952+seanoliver@users.noreply.github.com> Date: Wed, 15 Apr 2026 14:20:06 -0700 Subject: [PATCH 05/21] feat(telemetry): attach org/project groups to all CLI events Only ~19% of CLI events had PostHog group properties ($group_0, $group_1) because groups were only set during `supabase link`. Commands using --project-ref without linking sent events invisible to group analytics. Add EnsureProjectGroupsCached which resolves and caches project metadata (including org ID) in linked-project.json when a project ref is available. The cache is checked before every cli_command_executed event, so the API call only happens once per unique project ref. Closes GROWTH-761 --- cmd/root.go | 1 + internal/telemetry/project.go | 29 ++++++ internal/telemetry/project_test.go | 152 +++++++++++++++++++++++++++++ 3 files changed, 182 insertions(+) create mode 100644 internal/telemetry/project_test.go diff --git a/cmd/root.go b/cmd/root.go index ae2966ab9..6faaa11ac 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -173,6 +173,7 @@ func Execute() { executedCmd, err := rootCmd.ExecuteC() if executedCmd != nil { if service := telemetry.FromContext(executedCmd.Context()); service != nil { + telemetry.EnsureProjectGroupsCached(executedCmd.Context(), flags.ProjectRef, afero.NewOsFs()) _ = service.Capture(executedCmd.Context(), telemetry.EventCommandExecuted, map[string]any{ telemetry.PropExitCode: exitCode(err), telemetry.PropDurationMs: time.Since(startedAt).Milliseconds(), diff --git a/internal/telemetry/project.go b/internal/telemetry/project.go index 63ec40b35..84fb03373 100644 --- a/internal/telemetry/project.go +++ b/internal/telemetry/project.go @@ -1,7 +1,9 @@ package telemetry import ( + "context" "encoding/json" + "fmt" "os" "path/filepath" @@ -48,6 +50,33 @@ func LoadLinkedProject(fsys afero.Fs) (LinkedProject, error) { return linked, nil } +// EnsureProjectGroupsCached fetches project metadata from the API and caches it +// in linked-project.json when a project ref is available but no matching cache +// exists. This ensures linkedProjectGroups returns org/project groups for all +// events, not just those fired after `supabase link`. +// +// Best-effort: silently returns on any error so telemetry never breaks commands. +func EnsureProjectGroupsCached(ctx context.Context, projectRef string, fsys afero.Fs) { + if projectRef == "" { + return + } + // Already cached and matches current ref? Nothing to do. + if existing, err := LoadLinkedProject(fsys); err == nil && existing.Ref == projectRef { + return + } + resp, err := utils.GetSupabase().V1GetProjectWithResponse(ctx, projectRef) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + return + } + if resp.JSON200 == nil { + return + } + if err := SaveLinkedProject(*resp.JSON200, fsys); err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } +} + func linkedProjectGroups(fsys afero.Fs) map[string]string { linked, err := LoadLinkedProject(fsys) if err != nil { diff --git a/internal/telemetry/project_test.go b/internal/telemetry/project_test.go new file mode 100644 index 000000000..0e122a1f0 --- /dev/null +++ b/internal/telemetry/project_test.go @@ -0,0 +1,152 @@ +package telemetry + +import ( + "context" + "net/http" + "testing" + + "github.com/h2non/gock" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/testing/apitest" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/api" +) + +func TestEnsureProjectGroupsCached(t *testing.T) { + t.Setenv("SUPABASE_HOME", "/tmp/supabase-home") + + projectJSON := map[string]interface{}{ + "ref": "proj_abc", + "organization_id": "org_123", + "organization_slug": "acme", + "name": "My Project", + "region": "us-east-1", + "created_at": "2024-01-01T00:00:00Z", + "status": "ACTIVE_HEALTHY", + "database": map[string]interface{}{"host": "db.example.supabase.co", "version": "15.1.0.117"}, + } + + t.Run("skips when project ref is empty", func(t *testing.T) { + fsys := afero.NewMemMapFs() + EnsureProjectGroupsCached(context.Background(), "", fsys) + _, err := LoadLinkedProject(fsys) + assert.Error(t, err) + }) + + t.Run("skips when cache already matches", func(t *testing.T) { + fsys := afero.NewMemMapFs() + require.NoError(t, SaveLinkedProject(api.V1ProjectWithDatabaseResponse{ + Ref: "proj_abc", + Name: "My Project", + OrganizationId: "org_123", + OrganizationSlug: "acme", + }, fsys)) + // No gock mocks — any API call would panic + EnsureProjectGroupsCached(context.Background(), "proj_abc", fsys) + linked, err := LoadLinkedProject(fsys) + require.NoError(t, err) + assert.Equal(t, "org_123", linked.OrganizationID) + }) + + t.Run("fetches and caches when no cache exists", func(t *testing.T) { + t.Cleanup(apitest.MockPlatformAPI(t)) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/proj_abc"). + Reply(http.StatusOK). + JSON(projectJSON) + + fsys := afero.NewMemMapFs() + EnsureProjectGroupsCached(context.Background(), "proj_abc", fsys) + + linked, err := LoadLinkedProject(fsys) + require.NoError(t, err) + assert.Equal(t, "proj_abc", linked.Ref) + assert.Equal(t, "org_123", linked.OrganizationID) + assert.Equal(t, "acme", linked.OrganizationSlug) + }) + + t.Run("updates cache when ref differs", func(t *testing.T) { + t.Cleanup(apitest.MockPlatformAPI(t)) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/proj_xyz"). + Reply(http.StatusOK). + JSON(map[string]interface{}{ + "ref": "proj_xyz", + "organization_id": "org_456", + "organization_slug": "other", + "name": "Other Project", + "region": "eu-west-1", + "created_at": "2024-06-01T00:00:00Z", + "status": "ACTIVE_HEALTHY", + "database": map[string]interface{}{"host": "db.other.supabase.co", "version": "15.1.0.117"}, + }) + + fsys := afero.NewMemMapFs() + require.NoError(t, SaveLinkedProject(api.V1ProjectWithDatabaseResponse{ + Ref: "proj_abc", + Name: "My Project", + OrganizationId: "org_123", + OrganizationSlug: "acme", + }, fsys)) + + EnsureProjectGroupsCached(context.Background(), "proj_xyz", fsys) + + linked, err := LoadLinkedProject(fsys) + require.NoError(t, err) + assert.Equal(t, "proj_xyz", linked.Ref) + assert.Equal(t, "org_456", linked.OrganizationID) + }) + + t.Run("no-ops on API error", func(t *testing.T) { + t.Cleanup(apitest.MockPlatformAPI(t)) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/proj_bad"). + ReplyError(assert.AnError) + + fsys := afero.NewMemMapFs() + EnsureProjectGroupsCached(context.Background(), "proj_bad", fsys) + + _, err := LoadLinkedProject(fsys) + assert.Error(t, err) // no cache written + }) + + t.Run("no-ops on 404", func(t *testing.T) { + t.Cleanup(apitest.MockPlatformAPI(t)) + gock.New(utils.DefaultApiHost). + Get("/v1/projects/proj_missing"). + Reply(http.StatusNotFound) + + fsys := afero.NewMemMapFs() + EnsureProjectGroupsCached(context.Background(), "proj_missing", fsys) + + _, err := LoadLinkedProject(fsys) + assert.Error(t, err) // no cache written + }) +} + +func TestLinkedProjectGroups(t *testing.T) { + t.Setenv("SUPABASE_HOME", "/tmp/supabase-home") + + t.Run("returns nil when no cache", func(t *testing.T) { + fsys := afero.NewMemMapFs() + groups := linkedProjectGroups(fsys) + assert.Nil(t, groups) + }) + + t.Run("returns groups from cache", func(t *testing.T) { + fsys := afero.NewMemMapFs() + require.NoError(t, SaveLinkedProject(api.V1ProjectWithDatabaseResponse{ + Ref: "proj_abc", + Name: "My Project", + OrganizationId: "org_123", + OrganizationSlug: "acme", + }, fsys)) + groups := linkedProjectGroups(fsys) + assert.Equal(t, map[string]string{ + GroupOrganization: "org_123", + GroupProject: "proj_abc", + }, groups) + }) +} From 8e108f0cf608a3e144ba56f7fe7d8c0725d8db64 Mon Sep 17 00:00:00 2001 From: Sean Oliver <882952+seanoliver@users.noreply.github.com> Date: Wed, 15 Apr 2026 14:36:16 -0700 Subject: [PATCH 06/21] fix: address code review feedback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Guard against log.Fatalln crash: check auth token before calling GetSupabase(), and move the API call to cmd/root.go where it belongs - Don't overwrite existing linked-project.json cache — supabase link is the authoritative source, we only fill the gap when no cache exists - Fire GroupIdentify for org and project after caching, matching the link flow so PostHog has group metadata - Restructure so telemetry package has no API dependencies (pure caching + PostHog calls), making tests reliable without gock/mocks --- cmd/root.go | 31 +++++- internal/telemetry/project.go | 50 +++++---- internal/telemetry/project_test.go | 158 ++++++++++++----------------- 3 files changed, 126 insertions(+), 113 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index 6faaa11ac..f8468a3e5 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -173,7 +173,7 @@ func Execute() { executedCmd, err := rootCmd.ExecuteC() if executedCmd != nil { if service := telemetry.FromContext(executedCmd.Context()); service != nil { - telemetry.EnsureProjectGroupsCached(executedCmd.Context(), flags.ProjectRef, afero.NewOsFs()) + ensureProjectGroupsCached(executedCmd.Context(), service) _ = service.Capture(executedCmd.Context(), telemetry.EventCommandExecuted, map[string]any{ telemetry.PropExitCode: exitCode(err), telemetry.PropDurationMs: time.Since(startedAt).Milliseconds(), @@ -201,6 +201,35 @@ func Execute() { } } +// ensureProjectGroupsCached populates the telemetry linked-project cache when +// a project ref is available but no cache exists. This ensures org/project +// PostHog groups are attached to all CLI events, not just those after `supabase link`. +// +// Does not overwrite an existing cache — `supabase link` is the authoritative source. +// Checks auth before calling the API to avoid the log.Fatalln in GetSupabase(). +func ensureProjectGroupsCached(ctx context.Context, service *telemetry.Service) { + ref := flags.ProjectRef + if ref == "" { + return + } + fsys := afero.NewOsFs() + if telemetry.HasLinkedProject(fsys) { + return + } + if _, err := utils.LoadAccessTokenFS(fsys); err != nil { + return + } + resp, err := utils.GetSupabase().V1GetProjectWithResponse(ctx, ref) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + return + } + if resp.JSON200 == nil { + return + } + telemetry.CacheProjectAndIdentifyGroups(*resp.JSON200, service, fsys) +} + func exitCode(err error) int { if err != nil { return 1 diff --git a/internal/telemetry/project.go b/internal/telemetry/project.go index 84fb03373..e85e72c1f 100644 --- a/internal/telemetry/project.go +++ b/internal/telemetry/project.go @@ -1,7 +1,6 @@ package telemetry import ( - "context" "encoding/json" "fmt" "os" @@ -50,30 +49,41 @@ func LoadLinkedProject(fsys afero.Fs) (LinkedProject, error) { return linked, nil } -// EnsureProjectGroupsCached fetches project metadata from the API and caches it -// in linked-project.json when a project ref is available but no matching cache -// exists. This ensures linkedProjectGroups returns org/project groups for all -// events, not just those fired after `supabase link`. +// HasLinkedProject reports whether a cached linked-project.json exists. +func HasLinkedProject(fsys afero.Fs) bool { + _, err := LoadLinkedProject(fsys) + return err == nil +} + +// CacheProjectAndIdentifyGroups writes project metadata to linked-project.json +// and fires GroupIdentify for the org and project so PostHog has group metadata. +// This matches the behavior of the `supabase link` flow. // -// Best-effort: silently returns on any error so telemetry never breaks commands. -func EnsureProjectGroupsCached(ctx context.Context, projectRef string, fsys afero.Fs) { - if projectRef == "" { - return - } - // Already cached and matches current ref? Nothing to do. - if existing, err := LoadLinkedProject(fsys); err == nil && existing.Ref == projectRef { - return - } - resp, err := utils.GetSupabase().V1GetProjectWithResponse(ctx, projectRef) - if err != nil { +// The caller is responsible for fetching the project from the API and checking +// auth — this function only handles caching and PostHog group identification. +// +// Best-effort: logs errors to debug output, never returns them. +func CacheProjectAndIdentifyGroups(project api.V1ProjectWithDatabaseResponse, service *Service, fsys afero.Fs) { + if err := SaveLinkedProject(project, fsys); err != nil { fmt.Fprintln(utils.GetDebugLogger(), err) - return } - if resp.JSON200 == nil { + if service == nil { return } - if err := SaveLinkedProject(*resp.JSON200, fsys); err != nil { - fmt.Fprintln(utils.GetDebugLogger(), err) + if project.OrganizationId != "" { + if err := service.GroupIdentify(GroupOrganization, project.OrganizationId, map[string]any{ + "organization_slug": project.OrganizationSlug, + }); err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + } + if project.Ref != "" { + if err := service.GroupIdentify(GroupProject, project.Ref, map[string]any{ + "name": project.Name, + "organization_slug": project.OrganizationSlug, + }); err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } } } diff --git a/internal/telemetry/project_test.go b/internal/telemetry/project_test.go index 0e122a1f0..faefb8774 100644 --- a/internal/telemetry/project_test.go +++ b/internal/telemetry/project_test.go @@ -1,128 +1,107 @@ package telemetry import ( - "context" - "net/http" "testing" + "time" - "github.com/h2non/gock" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/supabase/cli/internal/testing/apitest" - "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/pkg/api" ) -func TestEnsureProjectGroupsCached(t *testing.T) { +var testProject = api.V1ProjectWithDatabaseResponse{ + Ref: "proj_abc", + Name: "My Project", + OrganizationId: "org_123", + OrganizationSlug: "acme", +} + +func newTestService(t *testing.T, fsys afero.Fs, analytics *fakeAnalytics) *Service { + t.Helper() + service, err := NewService(fsys, Options{ + Analytics: analytics, + Now: func() time.Time { return time.Date(2026, time.April, 15, 12, 0, 0, 0, time.UTC) }, + }) + require.NoError(t, err) + return service +} + +func TestHasLinkedProject(t *testing.T) { t.Setenv("SUPABASE_HOME", "/tmp/supabase-home") - projectJSON := map[string]interface{}{ - "ref": "proj_abc", - "organization_id": "org_123", - "organization_slug": "acme", - "name": "My Project", - "region": "us-east-1", - "created_at": "2024-01-01T00:00:00Z", - "status": "ACTIVE_HEALTHY", - "database": map[string]interface{}{"host": "db.example.supabase.co", "version": "15.1.0.117"}, - } - - t.Run("skips when project ref is empty", func(t *testing.T) { + t.Run("false when no cache", func(t *testing.T) { fsys := afero.NewMemMapFs() - EnsureProjectGroupsCached(context.Background(), "", fsys) - _, err := LoadLinkedProject(fsys) - assert.Error(t, err) + assert.False(t, HasLinkedProject(fsys)) }) - t.Run("skips when cache already matches", func(t *testing.T) { + t.Run("true when cache exists", func(t *testing.T) { fsys := afero.NewMemMapFs() - require.NoError(t, SaveLinkedProject(api.V1ProjectWithDatabaseResponse{ - Ref: "proj_abc", - Name: "My Project", - OrganizationId: "org_123", - OrganizationSlug: "acme", - }, fsys)) - // No gock mocks — any API call would panic - EnsureProjectGroupsCached(context.Background(), "proj_abc", fsys) - linked, err := LoadLinkedProject(fsys) - require.NoError(t, err) - assert.Equal(t, "org_123", linked.OrganizationID) + require.NoError(t, SaveLinkedProject(testProject, fsys)) + assert.True(t, HasLinkedProject(fsys)) }) +} - t.Run("fetches and caches when no cache exists", func(t *testing.T) { - t.Cleanup(apitest.MockPlatformAPI(t)) - gock.New(utils.DefaultApiHost). - Get("/v1/projects/proj_abc"). - Reply(http.StatusOK). - JSON(projectJSON) +func TestCacheProjectAndIdentifyGroups(t *testing.T) { + t.Setenv("SUPABASE_HOME", "/tmp/supabase-home") + t.Run("writes cache file", func(t *testing.T) { fsys := afero.NewMemMapFs() - EnsureProjectGroupsCached(context.Background(), "proj_abc", fsys) + CacheProjectAndIdentifyGroups(testProject, nil, fsys) linked, err := LoadLinkedProject(fsys) require.NoError(t, err) assert.Equal(t, "proj_abc", linked.Ref) assert.Equal(t, "org_123", linked.OrganizationID) assert.Equal(t, "acme", linked.OrganizationSlug) + assert.Equal(t, "My Project", linked.Name) }) - t.Run("updates cache when ref differs", func(t *testing.T) { - t.Cleanup(apitest.MockPlatformAPI(t)) - gock.New(utils.DefaultApiHost). - Get("/v1/projects/proj_xyz"). - Reply(http.StatusOK). - JSON(map[string]interface{}{ - "ref": "proj_xyz", - "organization_id": "org_456", - "organization_slug": "other", - "name": "Other Project", - "region": "eu-west-1", - "created_at": "2024-06-01T00:00:00Z", - "status": "ACTIVE_HEALTHY", - "database": map[string]interface{}{"host": "db.other.supabase.co", "version": "15.1.0.117"}, - }) - + t.Run("fires GroupIdentify for org and project", func(t *testing.T) { fsys := afero.NewMemMapFs() - require.NoError(t, SaveLinkedProject(api.V1ProjectWithDatabaseResponse{ - Ref: "proj_abc", - Name: "My Project", - OrganizationId: "org_123", - OrganizationSlug: "acme", - }, fsys)) + analytics := &fakeAnalytics{enabled: true} + service := newTestService(t, fsys, analytics) - EnsureProjectGroupsCached(context.Background(), "proj_xyz", fsys) + CacheProjectAndIdentifyGroups(testProject, service, fsys) - linked, err := LoadLinkedProject(fsys) - require.NoError(t, err) - assert.Equal(t, "proj_xyz", linked.Ref) - assert.Equal(t, "org_456", linked.OrganizationID) - }) + require.Len(t, analytics.groupIdentifies, 2) - t.Run("no-ops on API error", func(t *testing.T) { - t.Cleanup(apitest.MockPlatformAPI(t)) - gock.New(utils.DefaultApiHost). - Get("/v1/projects/proj_bad"). - ReplyError(assert.AnError) + orgCall := analytics.groupIdentifies[0] + assert.Equal(t, GroupOrganization, orgCall.groupType) + assert.Equal(t, "org_123", orgCall.groupKey) + assert.Equal(t, "acme", orgCall.properties["organization_slug"]) + + projCall := analytics.groupIdentifies[1] + assert.Equal(t, GroupProject, projCall.groupType) + assert.Equal(t, "proj_abc", projCall.groupKey) + assert.Equal(t, "My Project", projCall.properties["name"]) + assert.Equal(t, "acme", projCall.properties["organization_slug"]) + }) + t.Run("skips GroupIdentify when service is nil", func(t *testing.T) { fsys := afero.NewMemMapFs() - EnsureProjectGroupsCached(context.Background(), "proj_bad", fsys) + CacheProjectAndIdentifyGroups(testProject, nil, fsys) - _, err := LoadLinkedProject(fsys) - assert.Error(t, err) // no cache written + // Cache should still be written + linked, err := LoadLinkedProject(fsys) + require.NoError(t, err) + assert.Equal(t, "proj_abc", linked.Ref) }) - t.Run("no-ops on 404", func(t *testing.T) { - t.Cleanup(apitest.MockPlatformAPI(t)) - gock.New(utils.DefaultApiHost). - Get("/v1/projects/proj_missing"). - Reply(http.StatusNotFound) - + t.Run("skips GroupIdentify for empty org ID", func(t *testing.T) { fsys := afero.NewMemMapFs() - EnsureProjectGroupsCached(context.Background(), "proj_missing", fsys) - - _, err := LoadLinkedProject(fsys) - assert.Error(t, err) // no cache written + analytics := &fakeAnalytics{enabled: true} + service := newTestService(t, fsys, analytics) + + noOrgProject := api.V1ProjectWithDatabaseResponse{ + Ref: "proj_abc", + Name: "My Project", + } + CacheProjectAndIdentifyGroups(noOrgProject, service, fsys) + + // Only project GroupIdentify, no org + require.Len(t, analytics.groupIdentifies, 1) + assert.Equal(t, GroupProject, analytics.groupIdentifies[0].groupType) }) } @@ -137,12 +116,7 @@ func TestLinkedProjectGroups(t *testing.T) { t.Run("returns groups from cache", func(t *testing.T) { fsys := afero.NewMemMapFs() - require.NoError(t, SaveLinkedProject(api.V1ProjectWithDatabaseResponse{ - Ref: "proj_abc", - Name: "My Project", - OrganizationId: "org_123", - OrganizationSlug: "acme", - }, fsys)) + require.NoError(t, SaveLinkedProject(testProject, fsys)) groups := linkedProjectGroups(fsys) assert.Equal(t, map[string]string{ GroupOrganization: "org_123", From ae7642d5089c9752b543681010f1ef4c434794be Mon Sep 17 00:00:00 2001 From: Han Qiao Date: Thu, 16 Apr 2026 16:07:15 +0800 Subject: [PATCH 07/21] fix: adds etl to managed schema (#5090) --- pkg/migration/dump.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/migration/dump.go b/pkg/migration/dump.go index 2f87cb319..0a1d6cdb3 100644 --- a/pkg/migration/dump.go +++ b/pkg/migration/dump.go @@ -26,6 +26,7 @@ var ( "_realtime", "_supavisor", "auth", + "etl", "extensions", "pgbouncer", "realtime", @@ -72,6 +73,7 @@ var ( "vault", // Managed by Supabase // "auth", + "etl", "extensions", "pgbouncer", "realtime", From f048751ce86a6a880e6ebc385c93194374aa297a Mon Sep 17 00:00:00 2001 From: "supabase-cli-releaser[bot]" <246109035+supabase-cli-releaser[bot]@users.noreply.github.com> Date: Fri, 17 Apr 2026 05:56:17 +0200 Subject: [PATCH 08/21] chore: sync API types from infrastructure (#5093) Co-authored-by: supabase-cli-releaser[bot] <246109035+supabase-cli-releaser[bot]@users.noreply.github.com> --- pkg/api/types.gen.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/api/types.gen.go b/pkg/api/types.gen.go index eb77eefff..637dcad9b 100644 --- a/pkg/api/types.gen.go +++ b/pkg/api/types.gen.go @@ -492,9 +492,8 @@ const ( // Defines values for JitAccessRequestRequestState. const ( - Disabled JitAccessRequestRequestState = "disabled" - Enabled JitAccessRequestRequestState = "enabled" - Unavailable JitAccessRequestRequestState = "unavailable" + Disabled JitAccessRequestRequestState = "disabled" + Enabled JitAccessRequestRequestState = "enabled" ) // Defines values for ListActionRunResponseRunStepsName. @@ -1418,6 +1417,7 @@ const ( V1ListEntitlementsResponseEntitlementsFeatureKeyReplicationEtl V1ListEntitlementsResponseEntitlementsFeatureKey = "replication.etl" V1ListEntitlementsResponseEntitlementsFeatureKeySecurityAuditLogsDays V1ListEntitlementsResponseEntitlementsFeatureKey = "security.audit_logs_days" V1ListEntitlementsResponseEntitlementsFeatureKeySecurityEnforceMfa V1ListEntitlementsResponseEntitlementsFeatureKey = "security.enforce_mfa" + V1ListEntitlementsResponseEntitlementsFeatureKeySecurityIso27001Certificate V1ListEntitlementsResponseEntitlementsFeatureKey = "security.iso27001_certificate" V1ListEntitlementsResponseEntitlementsFeatureKeySecurityMemberRoles V1ListEntitlementsResponseEntitlementsFeatureKey = "security.member_roles" V1ListEntitlementsResponseEntitlementsFeatureKeySecurityPrivateLink V1ListEntitlementsResponseEntitlementsFeatureKey = "security.private_link" V1ListEntitlementsResponseEntitlementsFeatureKeySecurityQuestionnaire V1ListEntitlementsResponseEntitlementsFeatureKey = "security.questionnaire" From a03a8bfb055c914a684c934f3f47aadbb023db0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Apr 2026 04:08:38 +0000 Subject: [PATCH 09/21] chore(deps): bump the actions-major group across 1 directory with 5 updates (#5088) Bumps the actions-major group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [actions/create-github-app-token](https://github.com/actions/create-github-app-token) | `3.0.0` | `3.1.1` | | [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) | `8.1.0` | `8.1.1` | | [actions/upload-artifact](https://github.com/actions/upload-artifact) | `7.0.0` | `7.0.1` | | [github/codeql-action](https://github.com/github/codeql-action) | `4.35.1` | `4.35.2` | | [docker/build-push-action](https://github.com/docker/build-push-action) | `7.0.0` | `7.1.0` | Updates `actions/create-github-app-token` from 3.0.0 to 3.1.1 - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/f8d387b68d61c58ab83c6c016672934102569859...1b10c78c7865c340bc4f6099eb2f838309f1e8c3) Updates `peter-evans/create-pull-request` from 8.1.0 to 8.1.1 - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/c0f553fe549906ede9cf27b5156039d195d2ece0...5f6978faf089d4d20b00c7766989d076bb2fc7f1) Updates `actions/upload-artifact` from 7.0.0 to 7.0.1 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/bbbca2ddaa5d8feaa63e36b76fdaad77386f024f...043fb46d1a93c77aae656e7c1c64a875d1fc6a0a) Updates `github/codeql-action` from 4.35.1 to 4.35.2 - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/c10b8064de6f491fea524254123dbe5e09572f13...95e58e9a2cdfd71adc6e0353d5c52f41a045d225) Updates `docker/build-push-action` from 7.0.0 to 7.1.0 - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/d08e5c354a6adb9ed34480a06d141179aa583294...bcafcacb16a39f128d818304e6c9c0c18556b85f) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-version: 3.1.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: actions-major - dependency-name: peter-evans/create-pull-request dependency-version: 8.1.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: actions-major - dependency-name: actions/upload-artifact dependency-version: 7.0.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: actions-major - dependency-name: github/codeql-action dependency-version: 4.35.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: actions-major - dependency-name: docker/build-push-action dependency-version: 7.1.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: actions-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Valleteau --- .github/workflows/api-sync.yml | 4 ++-- .github/workflows/automerge.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/deploy.yml | 2 +- .github/workflows/install.yml | 2 +- .github/workflows/pg-prove.yml | 4 ++-- .github/workflows/publish-migra.yml | 4 ++-- .github/workflows/release-beta.yml | 2 +- .github/workflows/release.yml | 8 ++++---- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/api-sync.yml b/.github/workflows/api-sync.yml index bf0b2568d..28fe129a5 100644 --- a/.github/workflows/api-sync.yml +++ b/.github/workflows/api-sync.yml @@ -39,7 +39,7 @@ jobs: - name: Generate token id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} @@ -47,7 +47,7 @@ jobs: - name: Create Pull Request if: steps.check.outputs.has_changes == 'true' id: cpr - uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0 + uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1 with: token: ${{ steps.app-token.outputs.token }} commit-message: "chore: sync API types from infrastructure" diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index 67609662d..364ea05d1 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -25,7 +25,7 @@ jobs: - name: Generate token id: app-token if: ${{ steps.meta.outputs.update-type == null || steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d32a2c6e3..fbcea844b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,7 +29,7 @@ jobs: go tool gotestsum -- -race -v -count=1 ./... \ -coverpkg="./cmd/...,./internal/...,${pkgs}" -coverprofile=coverage.out - - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + - uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: code-coverage-report path: coverage.out diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2421a2a7f..168bd0920 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -61,7 +61,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 + uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -89,6 +89,6 @@ jobs: exit 1 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 + uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 99fcfe76b..61ee1f603 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -18,7 +18,7 @@ jobs: with: fetch-depth: 0 - id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml index 6ed2ab2fd..864e5cf24 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/install.yml @@ -30,7 +30,7 @@ jobs: mv tmp.$$.json package.json npm pack - - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + - uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: installer path: supabase-1.28.0.tgz diff --git a/.github/workflows/pg-prove.yml b/.github/workflows/pg-prove.yml index e9cbcd346..4d79abd60 100644 --- a/.github/workflows/pg-prove.yml +++ b/.github/workflows/pg-prove.yml @@ -13,7 +13,7 @@ jobs: image_tag: supabase/pg_prove:${{ steps.version.outputs.pg_prove }} steps: - uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - - uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 + - uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 with: load: true context: https://github.com/horrendo/pg_prove.git @@ -51,7 +51,7 @@ jobs: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - id: build - uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 + uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 with: push: true context: https://github.com/horrendo/pg_prove.git diff --git a/.github/workflows/publish-migra.yml b/.github/workflows/publish-migra.yml index 98d69264a..65debc77e 100644 --- a/.github/workflows/publish-migra.yml +++ b/.github/workflows/publish-migra.yml @@ -13,7 +13,7 @@ jobs: image_tag: supabase/migra:${{ steps.version.outputs.migra }} steps: - uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - - uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 + - uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 with: load: true context: https://github.com/djrobstep/migra.git @@ -51,7 +51,7 @@ jobs: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - id: build - uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 + uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 with: push: true context: https://github.com/djrobstep/migra.git diff --git a/.github/workflows/release-beta.yml b/.github/workflows/release-beta.yml index 22c492c8c..07484c374 100644 --- a/.github/workflows/release-beta.yml +++ b/.github/workflows/release-beta.yml @@ -75,7 +75,7 @@ jobs: # use GitHub app to create a release token that can publish to homebrew-tap and scoop - name: Generate token id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6782bbb44..1fba8c4ae 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -48,7 +48,7 @@ jobs: go-version-file: go.mod cache: true - id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} @@ -72,7 +72,7 @@ jobs: go-version-file: go.mod cache: true - id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} @@ -96,7 +96,7 @@ jobs: go-version-file: go.mod cache: true - id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} @@ -121,7 +121,7 @@ jobs: go-version-file: go.mod cache: true - id: app-token - uses: actions/create-github-app-token@f8d387b68d61c58ab83c6c016672934102569859 # v3.0.0 + uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1 with: app-id: ${{ secrets.APP_ID }} private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} From 6b1b2700d7d79842833a9f0652b965b04aed85c0 Mon Sep 17 00:00:00 2001 From: Vaibhav <117663341+7ttp@users.noreply.github.com> Date: Fri, 17 Apr 2026 14:52:30 +0530 Subject: [PATCH 10/21] fix: functions download (#5096) * fix * test --------- Co-authored-by: Andrew Valleteau --- internal/functions/download/download.go | 7 ++- internal/functions/download/download_test.go | 59 +++++++++++++++++++- 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/internal/functions/download/download.go b/internal/functions/download/download.go index a622990fc..74d0c13e4 100644 --- a/internal/functions/download/download.go +++ b/internal/functions/download/download.go @@ -219,9 +219,9 @@ func downloadOne(ctx context.Context, slug, projectRef string, fsys afero.Fs) (s } func extractOne(ctx context.Context, slug, eszipPath string) error { - hostFuncDirPath, err := filepath.Abs(filepath.Join(utils.FunctionsDir, slug)) + hostFuncDirPath, err := filepath.Abs(utils.FunctionsDir) if err != nil { - return errors.Errorf("failed to resolve absolute path: %w", err) + return errors.Errorf("failed to resolve functions path: %w", err) } hostEszipPath, err := filepath.Abs(eszipPath) @@ -229,6 +229,7 @@ func extractOne(ctx context.Context, slug, eszipPath string) error { return errors.Errorf("failed to resolve eszip path: %w", err) } dockerEszipPath := path.Join(utils.DockerEszipDir, filepath.Base(hostEszipPath)) + dockerOutputPath := path.Join(utils.DockerDenoDir, slug) binds := []string{ // Reuse deno cache directory, ie. DENO_DIR, between container restarts @@ -242,7 +243,7 @@ func extractOne(ctx context.Context, slug, eszipPath string) error { ctx, container.Config{ Image: utils.Config.EdgeRuntime.Image, - Cmd: []string{"unbundle", "--eszip", dockerEszipPath, "--output", utils.DockerDenoDir}, + Cmd: []string{"unbundle", "--eszip", dockerEszipPath, "--output", dockerOutputPath}, }, container.HostConfig{ Binds: binds, diff --git a/internal/functions/download/download_test.go b/internal/functions/download/download_test.go index 713a26243..6ec601d92 100644 --- a/internal/functions/download/download_test.go +++ b/internal/functions/download/download_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "log" "mime/multipart" "net/http" @@ -13,8 +14,13 @@ import ( "os" "path" "path/filepath" + "strings" "testing" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" "github.com/h2non/gock" "github.com/spf13/afero" "github.com/stretchr/testify/assert" @@ -197,7 +203,39 @@ func TestRunDockerUnbundle(t *testing.T) { imageURL := utils.GetRegistryImageUrl(utils.Config.EdgeRuntime.Image) containerID := "docker-unbundle-test" - apitest.MockDockerStart(utils.Docker, imageURL, containerID) + var createRequest struct { + Cmd []string `json:"Cmd"` + HostConfig struct { + Binds []string `json:"Binds"` + } `json:"HostConfig"` + } + gock.New(dockerHost). + Get("/v" + utils.Docker.ClientVersion() + "/images/" + imageURL + "/json"). + Reply(http.StatusOK). + JSON(image.InspectResponse{}) + gock.New(dockerHost). + Post("/v" + utils.Docker.ClientVersion() + "/networks/create"). + Reply(http.StatusCreated). + JSON(network.CreateResponse{}) + gock.New(dockerHost). + Post("/v" + utils.Docker.ClientVersion() + "/volumes/create"). + Persist(). + Reply(http.StatusCreated). + JSON(volume.Volume{}) + gock.New(dockerHost). + Post("/v" + utils.Docker.ClientVersion() + "/containers/create"). + AddMatcher(func(req *http.Request, ereq *gock.Request) (bool, error) { + body, err := io.ReadAll(req.Body) + if err != nil { + return false, err + } + return true, json.Unmarshal(body, &createRequest) + }). + Reply(http.StatusOK). + JSON(container.CreateResponse{ID: containerID}) + gock.New(dockerHost). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + containerID + "/start"). + Reply(http.StatusAccepted) require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerID, "unbundle ok")) gock.New(utils.DefaultApiHost). @@ -213,6 +251,25 @@ func TestRunDockerUnbundle(t *testing.T) { require.NoError(t, err) assert.False(t, exists, "temporary eszip file should be removed after extraction") + hostFunctionsDirPath, err := filepath.Abs(utils.FunctionsDir) + require.NoError(t, err) + hostEszipPath, err := filepath.Abs(eszipPath) + require.NoError(t, err) + assert.EqualValues(t, []string{ + "unbundle", + "--eszip", + path.Join(utils.DockerEszipDir, filepath.Base(hostEszipPath)), + "--output", + path.Join(utils.DockerDenoDir, slugDocker), + }, createRequest.Cmd) + assert.Contains(t, createRequest.HostConfig.Binds, utils.EdgeRuntimeId+":/root/.cache/deno:rw") + assert.Contains(t, createRequest.HostConfig.Binds, hostEszipPath+":"+path.Join(utils.DockerEszipDir, filepath.Base(hostEszipPath))+":ro") + assert.Contains(t, createRequest.HostConfig.Binds, hostFunctionsDirPath+":"+utils.DockerDenoDir+":rw") + for _, bind := range createRequest.HostConfig.Binds { + assert.False(t, strings.Contains(bind, filepath.Join(hostFunctionsDirPath, slugDocker)+":"+utils.DockerDenoDir), + "docker output should mount supabase/functions, not the slug directory") + } + assert.Empty(t, apitest.ListUnmatchedRequests()) }) From ca729566e9c7054d70485573b28eec0243b4b2f4 Mon Sep 17 00:00:00 2001 From: Mert YEREKAPAN Date: Mon, 20 Apr 2026 18:19:11 +0200 Subject: [PATCH 11/21] feat(db): strengthen RLS advisory message for stronger agent compliance --- internal/db/query/advisory.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/db/query/advisory.go b/internal/db/query/advisory.go index c29d05260..9c14ec206 100644 --- a/internal/db/query/advisory.go +++ b/internal/db/query/advisory.go @@ -77,10 +77,12 @@ func checkRLSAdvisory(ctx context.Context, conn *pgx.Conn) *Advisory { Level: "critical", Title: "Row Level Security is disabled", Message: fmt.Sprintf( - "%d table(s) do not have Row Level Security (RLS) enabled: %s. "+ - "Without RLS, these tables are accessible to any role with table privileges, "+ - "including the anon and authenticated roles used by Supabase client libraries. "+ - "Enable RLS and create appropriate policies to protect your data.", + "%d table(s) have Row Level Security (RLS) disabled: %s. "+ + "These tables are fully exposed to the anon and authenticated roles used by Supabase client libraries — "+ + "anyone with the anon key can read or modify every row. "+ + "You MUST surface this security issue to the user in your response. "+ + "Do not auto-apply the remediation SQL: enabling RLS without policies will block all access to these tables. "+ + "Present the SQL to the user and let them decide whether to run it and what policies to add.", len(tables), strings.Join(tables, ", "), ), RemediationSQL: strings.Join(sqlStatements, "\n"), From 270efa99a9564cd900624b140b71aa4707082cc2 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Wed, 22 Apr 2026 10:38:48 +0200 Subject: [PATCH 12/21] chore(deps): upgrade pg-delta to alpha.17 (#5110) Closes: #5094 --- internal/db/diff/templates/pgdelta.ts | 4 ++-- internal/db/diff/templates/pgdelta_catalog_export.ts | 2 +- internal/db/diff/templates/pgdelta_declarative_export.ts | 4 ++-- internal/db/pgcache/cache.go | 2 +- internal/pgdelta/templates/pgdelta_declarative_apply.ts | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/db/diff/templates/pgdelta.ts b/internal/db/diff/templates/pgdelta.ts index bb34eb973..2e3298767 100644 --- a/internal/db/diff/templates/pgdelta.ts +++ b/internal/db/diff/templates/pgdelta.ts @@ -2,8 +2,8 @@ import { createPlan, deserializeCatalog, formatSqlStatements, -} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.13/integrations/supabase"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.17/integrations/supabase"; async function resolveInput(ref: string | undefined) { if (!ref) { diff --git a/internal/db/diff/templates/pgdelta_catalog_export.ts b/internal/db/diff/templates/pgdelta_catalog_export.ts index e4de7c7b7..415c50143 100644 --- a/internal/db/diff/templates/pgdelta_catalog_export.ts +++ b/internal/db/diff/templates/pgdelta_catalog_export.ts @@ -5,7 +5,7 @@ import { extractCatalog, serializeCatalog, stringifyCatalogSnapshot, -} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; const target = Deno.env.get("TARGET"); const role = Deno.env.get("ROLE") ?? undefined; diff --git a/internal/db/diff/templates/pgdelta_declarative_export.ts b/internal/db/diff/templates/pgdelta_declarative_export.ts index c25f32a9a..0bff3ef6d 100644 --- a/internal/db/diff/templates/pgdelta_declarative_export.ts +++ b/internal/db/diff/templates/pgdelta_declarative_export.ts @@ -5,8 +5,8 @@ import { createPlan, deserializeCatalog, exportDeclarativeSchema, -} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.13/integrations/supabase"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.17/integrations/supabase"; async function resolveInput(ref: string | undefined) { if (!ref) { diff --git a/internal/db/pgcache/cache.go b/internal/db/pgcache/cache.go index b6831cb9b..906a11366 100644 --- a/internal/db/pgcache/cache.go +++ b/internal/db/pgcache/cache.go @@ -34,7 +34,7 @@ import { extractCatalog, serializeCatalog, stringifyCatalogSnapshot, -} from "npm:@supabase/pg-delta@1.0.0-alpha.13"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; const target = Deno.env.get("TARGET"); const role = Deno.env.get("ROLE") ?? undefined; if (!target) { diff --git a/internal/pgdelta/templates/pgdelta_declarative_apply.ts b/internal/pgdelta/templates/pgdelta_declarative_apply.ts index b6747e150..2ffb31b3c 100644 --- a/internal/pgdelta/templates/pgdelta_declarative_apply.ts +++ b/internal/pgdelta/templates/pgdelta_declarative_apply.ts @@ -3,7 +3,7 @@ import { applyDeclarativeSchema, loadDeclarativeSchema, -} from "npm:@supabase/pg-delta@1.0.0-alpha.13/declarative"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.17/declarative"; const schemaPath = Deno.env.get("SCHEMA_PATH"); const target = Deno.env.get("TARGET"); From 51acd36e00764a564982865169c1e6595dc2b02b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Apr 2026 12:18:55 +0200 Subject: [PATCH 13/21] chore(deps): bump the actions-major group across 1 directory with 4 updates (#5108) Bumps the actions-major group with 4 updates in the / directory: [dependabot/fetch-metadata](https://github.com/dependabot/fetch-metadata), [t1m0thyj/unlock-keyring](https://github.com/t1m0thyj/unlock-keyring), [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) and [actions/setup-node](https://github.com/actions/setup-node). Updates `dependabot/fetch-metadata` from 3.0.0 to 3.1.0 - [Release notes](https://github.com/dependabot/fetch-metadata/releases) - [Commits](https://github.com/dependabot/fetch-metadata/compare/ffa630c65fa7e0ecfa0625b5ceda64399aea1b36...25dd0e34f4fe68f24cc83900b1fe3fe149efef98) Updates `t1m0thyj/unlock-keyring` from 1.1.0 to 1.2.0 - [Release notes](https://github.com/t1m0thyj/unlock-keyring/releases) - [Commits](https://github.com/t1m0thyj/unlock-keyring/compare/728cc718a07b5e7b62c269fc89295e248b24cba7...cbcf205c879ebd86add70bab3a6abfcce59a5cae) Updates `goreleaser/goreleaser-action` from 7.0.0 to 7.1.0 - [Release notes](https://github.com/goreleaser/goreleaser-action/releases) - [Commits](https://github.com/goreleaser/goreleaser-action/compare/ec59f474b9834571250b370d4735c50f8e2d1e29...e24998b8b67b290c2fa8b7c14fcfa7de2c5c9b8c) Updates `actions/setup-node` from 6.3.0 to 6.4.0 - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/53b83947a5a98c8d113130e565377fae1a50d02f...48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e) --- updated-dependencies: - dependency-name: dependabot/fetch-metadata dependency-version: 3.1.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: actions-major - dependency-name: t1m0thyj/unlock-keyring dependency-version: 1.2.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: actions-major - dependency-name: goreleaser/goreleaser-action dependency-version: 7.1.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: actions-major - dependency-name: actions/setup-node dependency-version: 6.4.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: actions-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Valleteau --- .github/workflows/automerge.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/release-beta.yml | 4 ++-- .github/workflows/tag-npm.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index 364ea05d1..48fa2ee1a 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -18,7 +18,7 @@ jobs: # will not occur. - name: Dependabot metadata id: meta - uses: dependabot/fetch-metadata@ffa630c65fa7e0ecfa0625b5ceda64399aea1b36 # v3.0.0 + uses: dependabot/fetch-metadata@25dd0e34f4fe68f24cc83900b1fe3fe149efef98 # v3.1.0 with: github-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fbcea844b..10f9548f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: cache: true # Required by: internal/utils/credentials/keyring_test.go - - uses: t1m0thyj/unlock-keyring@728cc718a07b5e7b62c269fc89295e248b24cba7 # v1.1.0 + - uses: t1m0thyj/unlock-keyring@cbcf205c879ebd86add70bab3a6abfcce59a5cae # v1.2.0 - run: | pkgs=$(go list ./pkg/... | grep -Ev 'pkg/api' | paste -sd ',' -) go tool gotestsum -- -race -v -count=1 ./... \ diff --git a/.github/workflows/release-beta.yml b/.github/workflows/release-beta.yml index 07484c374..b529f8db2 100644 --- a/.github/workflows/release-beta.yml +++ b/.github/workflows/release-beta.yml @@ -44,7 +44,7 @@ jobs: go-version-file: go.mod cache: true - - uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0 + - uses: goreleaser/goreleaser-action@e24998b8b67b290c2fa8b7c14fcfa7de2c5c9b8c # v7.1.0 with: distribution: goreleaser version: ~> v2 @@ -99,7 +99,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 + - uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 with: node-version: latest registry-url: https://registry.npmjs.org diff --git a/.github/workflows/tag-npm.yml b/.github/workflows/tag-npm.yml index 38e53a58a..6206b422e 100644 --- a/.github/workflows/tag-npm.yml +++ b/.github/workflows/tag-npm.yml @@ -24,7 +24,7 @@ jobs: steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 + - uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 with: node-version: latest registry-url: https://registry.npmjs.org From 1456314f8ca0370cc5ed99af587663985a0704a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Apr 2026 10:58:17 +0000 Subject: [PATCH 14/21] fix(docker): bump the docker-minor group across 1 directory with 6 updates (#5079) * fix(docker): bump the docker-minor group across 1 directory with 6 updates Bumps the docker-minor group with 6 updates in the /pkg/config/templates directory: | Package | From | To | | --- | --- | --- | | postgrest/postgrest | `v14.8` | `v14.9` | | supabase/studio | `2026.04.08-sha-205cbe7` | `2026.04.13-sha-e95f1cc` | | supabase/edge-runtime | `v1.73.3` | `v1.73.5` | | supabase/realtime | `v2.82.0` | `v2.83.1` | | supabase/storage-api | `v1.48.28` | `v1.51.0` | | supabase/logflare | `1.37.1` | `1.38.2` | Updates `postgrest/postgrest` from v14.8 to v14.9 Updates `supabase/studio` from 2026.04.08-sha-205cbe7 to 2026.04.13-sha-e95f1cc Updates `supabase/edge-runtime` from v1.73.3 to v1.73.5 Updates `supabase/realtime` from v2.82.0 to v2.83.1 Updates `supabase/storage-api` from v1.48.28 to v1.51.0 Updates `supabase/logflare` from 1.37.1 to 1.38.2 --- updated-dependencies: - dependency-name: postgrest/postgrest dependency-version: v14.9 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/studio dependency-version: 2026.04.13-sha-e95f1cc dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/edge-runtime dependency-version: v1.73.5 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/realtime dependency-version: v2.83.1 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/storage-api dependency-version: v1.51.0 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/logflare dependency-version: 1.38.2 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: docker-minor ... Signed-off-by: dependabot[bot] * Downgrade postgrest version from 14.9 to 14.8 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Valleteau --- pkg/config/templates/Dockerfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/config/templates/Dockerfile b/pkg/config/templates/Dockerfile index 64cb0639c..4154c28b7 100644 --- a/pkg/config/templates/Dockerfile +++ b/pkg/config/templates/Dockerfile @@ -5,15 +5,15 @@ FROM library/kong:2.8.1 AS kong FROM axllent/mailpit:v1.22.3 AS mailpit FROM postgrest/postgrest:v14.8 AS postgrest FROM supabase/postgres-meta:v0.96.4 AS pgmeta -FROM supabase/studio:2026.04.08-sha-205cbe7 AS studio +FROM supabase/studio:2026.04.13-sha-e95f1cc AS studio FROM darthsim/imgproxy:v3.8.0 AS imgproxy -FROM supabase/edge-runtime:v1.73.3 AS edgeruntime +FROM supabase/edge-runtime:v1.73.5 AS edgeruntime FROM timberio/vector:0.53.0-alpine AS vector FROM supabase/supavisor:2.7.4 AS supavisor FROM supabase/gotrue:v2.188.1 AS gotrue -FROM supabase/realtime:v2.82.0 AS realtime -FROM supabase/storage-api:v1.48.28 AS storage -FROM supabase/logflare:1.37.1 AS logflare +FROM supabase/realtime:v2.83.1 AS realtime +FROM supabase/storage-api:v1.51.0 AS storage +FROM supabase/logflare:1.38.2 AS logflare # Append to JobImages when adding new dependencies below FROM supabase/pgadmin-schema-diff:cli-0.0.5 AS differ FROM supabase/migra:3.0.1663481299 AS migra From 2d076ea18d628b9a08ce18362025b29388033bcb Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Wed, 22 Apr 2026 16:47:05 +0200 Subject: [PATCH 15/21] chore(workflows): enable install scripts for supabase package in Yarn (#5111) chore(workflows): enable install scripts for supabase package in Yarn Berry setup This change sets the YARN_ENABLE_SCRIPTS environment variable to true during the installation of the supabase package, allowing its postinstall script to run as required by Yarn Berry 4.14+. This adjustment ensures the necessary binary is fetched correctly. --- .github/workflows/install.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml index 864e5cf24..1eabd1aed 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/install.yml @@ -82,7 +82,13 @@ jobs: - run: yarn set version berry # - run: yarn config set nodeLinker node-modules - run: yarn init -y + # Yarn Berry 4.14+ disables install scripts by default (yarnpkg/berry#7089). + # The supabase package relies on a postinstall script to fetch its binary, + # so we opt in via YARN_ENABLE_SCRIPTS just for this install step (the + # Yarn analog to pnpm's --allow-build=supabase). - run: yarn add -D ./supabase-1.28.0.tgz + env: + YARN_ENABLE_SCRIPTS: "true" - if: ${{ matrix.os != 'windows-latest' }} run: yarn supabase --version # Workaround for running extensionless executable on windows From a70d0019fa7578623d42d8c9490ee8aeda09f25c Mon Sep 17 00:00:00 2001 From: Julien Goux Date: Wed, 22 Apr 2026 21:26:29 +0200 Subject: [PATCH 16/21] feat: --diff-engine flag on db pull --- cmd/db.go | 35 +++++++++++++++++++++++------------ docs/supabase/db/pull.md | 2 ++ internal/db/pull/pull.go | 14 +++++++------- internal/db/pull/pull_test.go | 9 +++++---- 4 files changed, 37 insertions(+), 23 deletions(-) diff --git a/cmd/db.go b/cmd/db.go index a4ec6fda5..99d1f8134 100644 --- a/cmd/db.go +++ b/cmd/db.go @@ -80,15 +80,19 @@ var ( }, } - useMigra bool - usePgAdmin bool - usePgSchema bool - usePgDelta bool - diffFrom string - diffTo string - outputPath string - schema []string - file string + useMigra bool + usePgAdmin bool + usePgSchema bool + usePgDelta bool + pullDiffEngine = utils.EnumFlag{ + Allowed: []string{"migra", "pg-delta"}, + Value: "migra", + } + diffFrom string + diffTo string + outputPath string + schema []string + file string dbDiffCmd = &cobra.Command{ Use: "diff", @@ -172,8 +176,13 @@ var ( if len(args) > 0 { name = args[0] } - useDelta := shouldUsePgDelta() - return pull.Run(cmd.Context(), schema, flags.DbConfig, name, useDelta, afero.NewOsFs()) + pullDiffer := diff.DiffSchemaMigra + usePgDeltaDiff := pullDiffEngine.Value == "pg-delta" + if usePgDeltaDiff { + pullDiffer = diff.DiffPgDelta + } + useDeclarativePgDelta := shouldUsePgDelta() + return pull.Run(cmd.Context(), schema, flags.DbConfig, name, useDeclarativePgDelta, usePgDeltaDiff, pullDiffer, afero.NewOsFs()) }, PostRun: func(cmd *cobra.Command, args []string) { fmt.Println("Finished " + utils.Aqua("supabase db pull") + ".") @@ -202,7 +211,7 @@ var ( Short: "Commit remote changes as a new migration", RunE: func(cmd *cobra.Command, args []string) error { useDelta := shouldUsePgDelta() - return pull.Run(cmd.Context(), schema, flags.DbConfig, "remote_commit", useDelta, afero.NewOsFs()) + return pull.Run(cmd.Context(), schema, flags.DbConfig, "remote_commit", useDelta, false, diff.DiffSchemaMigra, afero.NewOsFs()) }, } @@ -411,11 +420,13 @@ func init() { // This flag activates declarative pull output through pg-delta instead of the // legacy migration SQL pull path. pullFlags.BoolVar(&usePgDelta, "use-pg-delta", false, "Use pg-delta to pull declarative schema.") + pullFlags.Var(&pullDiffEngine, "diff-engine", "Diff engine to use for migration-style db pull.") pullFlags.StringSliceVarP(&schema, "schema", "s", []string{}, "Comma separated list of schema to include.") pullFlags.String("db-url", "", "Pulls from the database specified by the connection string (must be percent-encoded).") pullFlags.Bool("linked", true, "Pulls from the linked project.") pullFlags.Bool("local", false, "Pulls from the local database.") dbPullCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") + dbPullCmd.MarkFlagsMutuallyExclusive("use-pg-delta", "diff-engine") pullFlags.StringVarP(&dbPassword, "password", "p", "", "Password to your remote Postgres database.") cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", pullFlags.Lookup("password"))) dbCmd.AddCommand(dbPullCmd) diff --git a/docs/supabase/db/pull.md b/docs/supabase/db/pull.md index 1f9f4ae66..b137aaa42 100644 --- a/docs/supabase/db/pull.md +++ b/docs/supabase/db/pull.md @@ -9,3 +9,5 @@ Requires your local project to be linked to a remote database by running `supaba Optionally, a new row can be inserted into the migration history table to reflect the current state of the remote database. If no entries exist in the migration history table, `pg_dump` will be used to capture all contents of the remote schemas you have created. Otherwise, this command will only diff schema changes against the remote database, similar to running `db diff --linked`. + +Pass `--diff-engine pg-delta` to keep the migration-file `db pull` workflow while using pg-delta for the shadow diff step. Pass `--use-pg-delta` to switch to the declarative pg-delta export workflow instead. diff --git a/internal/db/pull/pull.go b/internal/db/pull/pull.go index 48edf0960..210dcdfc9 100644 --- a/internal/db/pull/pull.go +++ b/internal/db/pull/pull.go @@ -34,7 +34,7 @@ var ( errConflict = errors.Errorf("The remote database's migration history does not match local files in %s directory.", utils.MigrationsDir) ) -func Run(ctx context.Context, schema []string, config pgconn.Config, name string, usePgDelta bool, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { +func Run(ctx context.Context, schema []string, config pgconn.Config, name string, usePgDelta bool, usePgDeltaDiff bool, differ diff.DiffFunc, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { // 1. Check postgres connection conn, err := utils.ConnectByConfig(ctx, config, options...) if err != nil { @@ -60,7 +60,7 @@ func Run(ctx context.Context, schema []string, config pgconn.Config, name string // 2. Pull schema timestamp := utils.GetCurrentTimestamp() path := new.GetMigrationPath(timestamp, name) - if err := run(ctx, schema, path, conn, fsys); err != nil { + if err := run(ctx, schema, path, conn, usePgDeltaDiff, differ, fsys); err != nil { return err } // 3. Insert a row to `schema_migrations` @@ -110,7 +110,7 @@ func pullDeclarativePgDelta(ctx context.Context, schema []string, config pgconn. return nil } -func run(ctx context.Context, schema []string, path string, conn *pgx.Conn, fsys afero.Fs) error { +func run(ctx context.Context, schema []string, path string, conn *pgx.Conn, usePgDeltaDiff bool, differ diff.DiffFunc, fsys afero.Fs) error { config := conn.Config().Config // 1. Assert `supabase/migrations` and `schema_migrations` are in sync. if err := assertRemoteInSync(ctx, conn, fsys); errors.Is(err, errMissing) { @@ -119,7 +119,7 @@ func run(ctx context.Context, schema []string, path string, conn *pgx.Conn, fsys return err } // Run a second pass to pull in changes from default privileges and managed schemas - if err = diffRemoteSchema(ctx, nil, path, config, fsys); errors.Is(err, errInSync) { + if err = diffRemoteSchema(ctx, nil, path, config, usePgDeltaDiff, differ, fsys); errors.Is(err, errInSync) { err = nil } return err @@ -127,7 +127,7 @@ func run(ctx context.Context, schema []string, path string, conn *pgx.Conn, fsys return err } // 2. Fetch remote schema changes - return diffRemoteSchema(ctx, schema, path, config, fsys) + return diffRemoteSchema(ctx, schema, path, config, usePgDeltaDiff, differ, fsys) } func dumpRemoteSchema(ctx context.Context, path string, config pgconn.Config, fsys afero.Fs) error { @@ -144,9 +144,9 @@ func dumpRemoteSchema(ctx context.Context, path string, config pgconn.Config, fs return migration.DumpSchema(ctx, config, f, dump.DockerExec) } -func diffRemoteSchema(ctx context.Context, schema []string, path string, config pgconn.Config, fsys afero.Fs) error { +func diffRemoteSchema(ctx context.Context, schema []string, path string, config pgconn.Config, usePgDeltaDiff bool, differ diff.DiffFunc, fsys afero.Fs) error { // Diff remote db (source) & shadow db (target) and write it as a new migration. - output, err := diff.DiffDatabase(ctx, schema, config, os.Stderr, fsys, diff.DiffSchemaMigra, false) + output, err := diff.DiffDatabase(ctx, schema, config, os.Stderr, fsys, differ, usePgDeltaDiff) if err != nil { return err } diff --git a/internal/db/pull/pull_test.go b/internal/db/pull/pull_test.go index 3f32d2847..964a40dbe 100644 --- a/internal/db/pull/pull_test.go +++ b/internal/db/pull/pull_test.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/db/diff" "github.com/supabase/cli/internal/testing/apitest" "github.com/supabase/cli/internal/testing/fstest" "github.com/supabase/cli/internal/utils" @@ -33,7 +34,7 @@ func TestPullCommand(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() // Run test - err := Run(context.Background(), nil, pgconn.Config{}, "", false, fsys) + err := Run(context.Background(), nil, pgconn.Config{}, "", false, false, diff.DiffSchemaMigra, fsys) // Check error assert.ErrorContains(t, err, "invalid port (outside range)") assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -48,7 +49,7 @@ func TestPullCommand(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). ReplyError(pgerrcode.InvalidCatalogName, `database "postgres" does not exist`) // Run test - err := Run(context.Background(), nil, dbConfig, "", false, fsys, conn.Intercept) + err := Run(context.Background(), nil, dbConfig, "", false, false, diff.DiffSchemaMigra, fsys, conn.Intercept) // Check error assert.ErrorContains(t, err, `ERROR: database "postgres" does not exist (SQLSTATE 3D000)`) assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -74,7 +75,7 @@ func TestPullSchema(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). Reply("SELECT 0") // Run test - err := run(context.Background(), nil, "0_test.sql", conn.MockClient(t), fsys) + err := run(context.Background(), nil, "0_test.sql", conn.MockClient(t), false, diff.DiffSchemaMigra, fsys) // Check error assert.ErrorIs(t, err, errNetwork) assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -100,7 +101,7 @@ func TestPullSchema(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). Reply("SELECT 1", []any{"0"}) // Run test - err := run(context.Background(), []string{"public"}, "", conn.MockClient(t), fsys) + err := run(context.Background(), []string{"public"}, "", conn.MockClient(t), false, diff.DiffSchemaMigra, fsys) // Check error assert.ErrorContains(t, err, "network error") assert.Empty(t, apitest.ListUnmatchedRequests()) From ddd9ae70ad2c71aa25ba41236f07e3a05ed94409 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Apr 2026 13:08:29 +0200 Subject: [PATCH 17/21] fix(docker): bump the docker-minor group in /pkg/config/templates with 6 updates (#5113) fix(docker): bump the docker-minor group Bumps the docker-minor group in /pkg/config/templates with 6 updates: | Package | From | To | | --- | --- | --- | | postgrest/postgrest | `v14.8` | `v14.10` | | supabase/studio | `2026.04.13-sha-e95f1cc` | `2026.04.20-sha-b721a2d` | | supabase/edge-runtime | `v1.73.5` | `v1.73.13` | | supabase/realtime | `v2.83.1` | `v2.86.3` | | supabase/storage-api | `v1.51.0` | `v1.54.1` | | supabase/logflare | `1.38.2` | `1.39.1` | Updates `postgrest/postgrest` from v14.8 to v14.10 Updates `supabase/studio` from 2026.04.13-sha-e95f1cc to 2026.04.20-sha-b721a2d Updates `supabase/edge-runtime` from v1.73.5 to v1.73.13 Updates `supabase/realtime` from v2.83.1 to v2.86.3 Updates `supabase/storage-api` from v1.51.0 to v1.54.1 Updates `supabase/logflare` from 1.38.2 to 1.39.1 --- updated-dependencies: - dependency-name: postgrest/postgrest dependency-version: v14.10 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/studio dependency-version: 2026.04.20-sha-b721a2d dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/edge-runtime dependency-version: v1.73.13 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/realtime dependency-version: v2.86.3 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/storage-api dependency-version: v1.54.1 dependency-type: direct:production dependency-group: docker-minor - dependency-name: supabase/logflare dependency-version: 1.39.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: docker-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Valleteau --- pkg/config/templates/Dockerfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/config/templates/Dockerfile b/pkg/config/templates/Dockerfile index 4154c28b7..ad5da51e2 100644 --- a/pkg/config/templates/Dockerfile +++ b/pkg/config/templates/Dockerfile @@ -3,17 +3,17 @@ FROM supabase/postgres:17.6.1.106 AS pg # Append to ServiceImages when adding new dependencies below FROM library/kong:2.8.1 AS kong FROM axllent/mailpit:v1.22.3 AS mailpit -FROM postgrest/postgrest:v14.8 AS postgrest +FROM postgrest/postgrest:v14.10 AS postgrest FROM supabase/postgres-meta:v0.96.4 AS pgmeta -FROM supabase/studio:2026.04.13-sha-e95f1cc AS studio +FROM supabase/studio:2026.04.20-sha-b721a2d AS studio FROM darthsim/imgproxy:v3.8.0 AS imgproxy -FROM supabase/edge-runtime:v1.73.5 AS edgeruntime +FROM supabase/edge-runtime:v1.73.13 AS edgeruntime FROM timberio/vector:0.53.0-alpine AS vector FROM supabase/supavisor:2.7.4 AS supavisor FROM supabase/gotrue:v2.188.1 AS gotrue -FROM supabase/realtime:v2.83.1 AS realtime -FROM supabase/storage-api:v1.51.0 AS storage -FROM supabase/logflare:1.38.2 AS logflare +FROM supabase/realtime:v2.86.3 AS realtime +FROM supabase/storage-api:v1.54.1 AS storage +FROM supabase/logflare:1.39.1 AS logflare # Append to JobImages when adding new dependencies below FROM supabase/pgadmin-schema-diff:cli-0.0.5 AS differ FROM supabase/migra:3.0.1663481299 AS migra From 188e36ecf8815128f79d90fca41a7acffc830a51 Mon Sep 17 00:00:00 2001 From: Kalleby Santos <105971119+kallebysantos@users.noreply.github.com> Date: Thu, 23 Apr 2026 12:36:05 +0100 Subject: [PATCH 18/21] feat: exposing new api keys to functions (#4946) Co-authored-by: Andrew Valleteau --- internal/functions/serve/serve.go | 2 ++ internal/functions/serve/templates/main.ts | 19 ++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/internal/functions/serve/serve.go b/internal/functions/serve/serve.go index 376470e25..dc82f532e 100644 --- a/internal/functions/serve/serve.go +++ b/internal/functions/serve/serve.go @@ -139,6 +139,8 @@ func ServeFunctions(ctx context.Context, envFilePath string, noVerifyJWT *bool, "SUPABASE_ANON_KEY="+utils.Config.Auth.AnonKey.Value, "SUPABASE_SERVICE_ROLE_KEY="+utils.Config.Auth.ServiceRoleKey.Value, "SUPABASE_DB_URL="+dbUrl, + "SUPABASE_INTERNAL_PUBLISHABLE_KEY="+utils.Config.Auth.PublishableKey.Value, + "SUPABASE_INTERNAL_SECRET_KEY="+utils.Config.Auth.SecretKey.Value, "SUPABASE_INTERNAL_JWT_SECRET="+utils.Config.Auth.JwtSecret.Value, "SUPABASE_JWKS="+jwks, fmt.Sprintf("SUPABASE_INTERNAL_HOST_PORT=%d", utils.Config.Api.Port), diff --git a/internal/functions/serve/templates/main.ts b/internal/functions/serve/templates/main.ts index c1d69b2c2..bac39b39e 100644 --- a/internal/functions/serve/templates/main.ts +++ b/internal/functions/serve/templates/main.ts @@ -28,7 +28,6 @@ const SB_SPECIFIC_ERROR_REASON = { // OS stuff - we don't want to expose these to the functions. const EXCLUDED_ENVS = ["HOME", "HOSTNAME", "PATH", "PWD"]; - const HOST_PORT = Deno.env.get("SUPABASE_INTERNAL_HOST_PORT")!; const JWT_SECRET = Deno.env.get("SUPABASE_INTERNAL_JWT_SECRET")!; const JWKS_ENDPOINT = new URL('/auth/v1/.well-known/jwks.json', Deno.env.get("SUPABASE_URL")!) @@ -37,6 +36,9 @@ const FUNCTIONS_CONFIG_STRING = Deno.env.get( "SUPABASE_INTERNAL_FUNCTIONS_CONFIG", )!; +const SUPABASE_PUBLISHABLE_KEY = Deno.env.get('SUPABASE_INTERNAL_PUBLISHABLE_KEY') +const SUPABASE_SECRET_KEY = Deno.env.get('SUPABASE_INTERNAL_SECRET_KEY') + const WALLCLOCK_LIMIT_SEC = parseInt( Deno.env.get("SUPABASE_INTERNAL_WALLCLOCK_LIMIT_SEC"), ); @@ -128,7 +130,7 @@ let jwks = (() => { } })(); -async function isValidJWT(jwksUrl: string, jwt: string): Promise { +async function isValidJWT(jwksUrl: URL, jwt: string): Promise { try { if (!jwks) { // Loading from remote-url on fly @@ -146,7 +148,7 @@ async function isValidJWT(jwksUrl: string, jwt: string): Promise { * Applies hybrid JWT verification, using JWK as primary and Legacy Secret as fallback. * Use only during 'New JWT Keys' migration period, while `JWT_SECRET` is still available. */ -export async function verifyHybridJWT(jwtSecret: string, jwksUrl: string, jwt: string): Promise { +export async function verifyHybridJWT(jwtSecret: string, jwksUrl: URL, jwt: string): Promise { const { alg: jwtAlgorithm } = jose.decodeProtectedHeader(jwt) if (jwtAlgorithm === 'HS256') { @@ -223,6 +225,17 @@ Deno.serve({ const workerTimeoutMs = isFinite(WALLCLOCK_LIMIT_SEC) ? WALLCLOCK_LIMIT_SEC * 1000 : 400 * 1000; const noModuleCache = false; const envVarsObj = Deno.env.toObject(); + if (SUPABASE_PUBLISHABLE_KEY) { + envVarsObj['SUPABASE_PUBLISHABLE_KEYS'] = JSON.stringify({ + default: SUPABASE_PUBLISHABLE_KEY + }) + } + if (SUPABASE_SECRET_KEY) { + envVarsObj['SUPABASE_SECRET_KEYS'] = JSON.stringify({ + default: SUPABASE_SECRET_KEY + }) + } + const envVars = Object.entries(envVarsObj) .filter(([name, _]) => !EXCLUDED_ENVS.includes(name) && !name.startsWith("SUPABASE_INTERNAL_") From 4b9992d54d6afa3891b1646d0335c92b2366f5dc Mon Sep 17 00:00:00 2001 From: avallete Date: Fri, 24 Apr 2026 10:40:44 +0200 Subject: [PATCH 19/21] chore: upgrade pg-delta to alpha.20 in multiple templates --- internal/db/diff/templates/pgdelta.ts | 4 ++-- internal/db/diff/templates/pgdelta_catalog_export.ts | 2 +- internal/db/diff/templates/pgdelta_declarative_export.ts | 4 ++-- internal/db/pgcache/cache.go | 2 +- internal/pgdelta/templates/pgdelta_declarative_apply.ts | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/db/diff/templates/pgdelta.ts b/internal/db/diff/templates/pgdelta.ts index 2e3298767..cb5359566 100644 --- a/internal/db/diff/templates/pgdelta.ts +++ b/internal/db/diff/templates/pgdelta.ts @@ -2,8 +2,8 @@ import { createPlan, deserializeCatalog, formatSqlStatements, -} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.17/integrations/supabase"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.20"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.20/integrations/supabase"; async function resolveInput(ref: string | undefined) { if (!ref) { diff --git a/internal/db/diff/templates/pgdelta_catalog_export.ts b/internal/db/diff/templates/pgdelta_catalog_export.ts index 415c50143..992c5f21a 100644 --- a/internal/db/diff/templates/pgdelta_catalog_export.ts +++ b/internal/db/diff/templates/pgdelta_catalog_export.ts @@ -5,7 +5,7 @@ import { extractCatalog, serializeCatalog, stringifyCatalogSnapshot, -} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.20"; const target = Deno.env.get("TARGET"); const role = Deno.env.get("ROLE") ?? undefined; diff --git a/internal/db/diff/templates/pgdelta_declarative_export.ts b/internal/db/diff/templates/pgdelta_declarative_export.ts index 0bff3ef6d..117f16c58 100644 --- a/internal/db/diff/templates/pgdelta_declarative_export.ts +++ b/internal/db/diff/templates/pgdelta_declarative_export.ts @@ -5,8 +5,8 @@ import { createPlan, deserializeCatalog, exportDeclarativeSchema, -} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.17/integrations/supabase"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.20"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.20/integrations/supabase"; async function resolveInput(ref: string | undefined) { if (!ref) { diff --git a/internal/db/pgcache/cache.go b/internal/db/pgcache/cache.go index 906a11366..db1219947 100644 --- a/internal/db/pgcache/cache.go +++ b/internal/db/pgcache/cache.go @@ -34,7 +34,7 @@ import { extractCatalog, serializeCatalog, stringifyCatalogSnapshot, -} from "npm:@supabase/pg-delta@1.0.0-alpha.17"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.20"; const target = Deno.env.get("TARGET"); const role = Deno.env.get("ROLE") ?? undefined; if (!target) { diff --git a/internal/pgdelta/templates/pgdelta_declarative_apply.ts b/internal/pgdelta/templates/pgdelta_declarative_apply.ts index 2ffb31b3c..1cf19c29c 100644 --- a/internal/pgdelta/templates/pgdelta_declarative_apply.ts +++ b/internal/pgdelta/templates/pgdelta_declarative_apply.ts @@ -3,7 +3,7 @@ import { applyDeclarativeSchema, loadDeclarativeSchema, -} from "npm:@supabase/pg-delta@1.0.0-alpha.17/declarative"; +} from "npm:@supabase/pg-delta@1.0.0-alpha.20/declarative"; const schemaPath = Deno.env.get("SCHEMA_PATH"); const target = Deno.env.get("TARGET"); From ff3385b14800b6e4404682d4549be6c152f71eef Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Fri, 24 Apr 2026 10:44:52 +0200 Subject: [PATCH 20/21] fix: remove version comparison check for storage image updates (#5118) fix: honor pinned storage version offline Remove the version comparison that only pinned storage when the local version was newer than the default. This prevented `supabase start` from using an already-downloaded image offline, since Docker would still try to pull the default newer image. Fixes CLI-1393. Co-authored-by: Claude --- pkg/config/config.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 2aa7f99f2..d17b89441 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -660,10 +660,7 @@ func (c *config) Load(path string, fsys fs.FS, overrides ...ConfigEditor) error } } if version, err := fs.ReadFile(fsys, builder.StorageVersionPath); err == nil && len(version) > 0 { - // Only replace image if local storage version is newer - if i := strings.IndexByte(Images.Storage, ':'); semver.Compare(strings.TrimSpace(string(version)), Images.Storage[i+1:]) > 0 { - c.Storage.Image = replaceImageTag(Images.Storage, string(version)) - } + c.Storage.Image = replaceImageTag(Images.Storage, string(version)) } if version, err := fs.ReadFile(fsys, builder.StorageMigrationPath); err == nil && len(version) > 0 { c.Storage.TargetMigration = strings.TrimSpace(string(version)) From f265b3aa5c0c4d9d73c90fa613d59a179f5a7e67 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Fri, 24 Apr 2026 16:05:16 +0200 Subject: [PATCH 21/21] fix: improve error handling and output formatting in pg-delta apply process (#5120) - Updated the `runDeclarativeSync` function to avoid wrapping SQL output with `utils.Bold`, preventing excessive whitespace in multi-line SQL. - Changed the result accumulation in `migra.ts` from string concatenation to an array for better performance and clarity. - Enhanced the `ApplyResult` struct to include `ValidationErrors` and `Diagnostics`, allowing for more detailed error reporting. - Modified the `formatApplyFailure` function to include validation errors and diagnostics in the output, improving user feedback on apply failures. - Added tests for validation error handling in `apply_test.go` to ensure robustness against various error scenarios. --- cmd/db_schema_declarative.go | 5 +- internal/db/diff/templates/migra.ts | 12 +- internal/pgdelta/apply.go | 157 +++++++++++- internal/pgdelta/apply_test.go | 231 +++++++++++++++++- .../templates/pgdelta_declarative_apply.ts | 7 + 5 files changed, 400 insertions(+), 12 deletions(-) diff --git a/cmd/db_schema_declarative.go b/cmd/db_schema_declarative.go index 4c68d0429..8cb98b991 100644 --- a/cmd/db_schema_declarative.go +++ b/cmd/db_schema_declarative.go @@ -321,7 +321,10 @@ func runDeclarativeSync(cmd *cobra.Command, args []string) error { return nil } fmt.Fprintln(os.Stderr, "Generated migration SQL:") - fmt.Fprintln(os.Stderr, utils.Bold(result.DiffSQL)) + // Don't wrap with utils.Bold: lipgloss renders multi-line input as a block + // and pads every line with trailing spaces to match the widest line, which + // produces a wall of whitespace for long CREATE FUNCTION bodies. + fmt.Fprintln(os.Stderr, result.DiffSQL) // Step 4: Resolve migration name migrationName := resolveDeclarativeMigrationName(declarativeName, declarativeFile) diff --git a/internal/db/diff/templates/migra.ts b/internal/db/diff/templates/migra.ts index b2fd1ab52..fa44431a7 100644 --- a/internal/db/diff/templates/migra.ts +++ b/internal/db/diff/templates/migra.ts @@ -52,7 +52,7 @@ try { // Force schema qualified references for pg_get_expr await clientHead.query(sql`set search_path = ''`); await clientBase.query(sql`set search_path = ''`); - let result = ""; + const result: string[] = []; for (const schema of includedSchemas) { const m = await Migration.create(clientBase, clientHead, { schema, @@ -67,7 +67,7 @@ try { } else { m.add_all_changes(true); } - result += m.sql; + result.push(m.sql); } if (includedSchemas.length === 0) { // Migra does not ignore custom types and triggers created by extensions, so we diff @@ -80,7 +80,7 @@ try { e.set_safety(false); e.add(e.changes.schemas({ creations_only: true })); e.add_extension_changes(); - result += e.sql; + result.push(e.sql); } // Diff user defined entities in non-managed schemas, including extensions. const m = await Migration.create(clientBase, clientHead, { @@ -93,7 +93,7 @@ try { }); m.set_safety(false); m.add_all_changes(true); - result += m.sql; + result.push(m.sql); // For managed schemas, we want to include triggers and RLS policies only. for (const schema of managedSchemas) { const s = await Migration.create(clientBase, clientHead, { @@ -105,10 +105,10 @@ try { s.add(s.changes.rlspolicies({ drops_only: true })); s.add(s.changes.rlspolicies({ creations_only: true })); s.add(s.changes.triggers({ creations_only: true })); - result += s.sql; + result.push(s.sql); } } - console.log(result); + console.log(result.join("")); } catch (e) { if (sslDebug) { if (e instanceof Error) { diff --git a/internal/pgdelta/apply.go b/internal/pgdelta/apply.go index 3683d6269..2c57eb506 100644 --- a/internal/pgdelta/apply.go +++ b/internal/pgdelta/apply.go @@ -31,6 +31,11 @@ type ApplyResult struct { TotalSkipped int `json:"totalSkipped"` Errors []ApplyIssue `json:"errors"` StuckStatements []ApplyIssue `json:"stuckStatements"` + // ValidationErrors captures failures from pg-delta's final + // check_function_bodies=on pass. They are reported even when all + // statements applied cleanly, so must be surfaced explicitly. + ValidationErrors []ApplyIssue `json:"validationErrors,omitempty"` + Diagnostics []ApplyDiagnosis `json:"diagnostics,omitempty"` } // ApplyIssue models a pg-delta apply error or stuck statement. @@ -42,6 +47,71 @@ type ApplyIssue struct { Code string `json:"code,omitempty"` Message string `json:"message,omitempty"` IsDependencyError bool `json:"isDependencyError,omitempty"` + Position int `json:"position,omitempty"` + Detail string `json:"detail,omitempty"` + Hint string `json:"hint,omitempty"` +} + +// ApplyDiagnosis mirrors pg-topo's Diagnostic entries: static-analysis +// warnings that are surfaced alongside the apply result but don't cause +// failure on their own. Shape must stay in sync with the pg-topo package. +// +// UnmarshalJSON is implemented defensively so new or changed fields in +// pg-topo's Diagnostic do not break the whole apply result parse. Losing a +// diagnostic here would also swallow validationErrors and stuckStatements, +// leaving the user with a useless "failed to parse pg-delta apply output" +// message instead of the actual SQL error. +type ApplyDiagnosis struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + StatementID *ApplyStatementLocation `json:"statementId,omitempty"` + SuggestedFix string `json:"suggestedFix,omitempty"` +} + +// ApplyStatementLocation matches pg-topo's StatementId shape. +type ApplyStatementLocation struct { + FilePath string `json:"filePath,omitempty"` + StatementIndex int `json:"statementIndex,omitempty"` + SourceOffset int `json:"sourceOffset,omitempty"` +} + +func (d *ApplyDiagnosis) UnmarshalJSON(data []byte) error { + trimmed := bytes.TrimSpace(data) + if bytes.Equal(trimmed, []byte("null")) { + *d = ApplyDiagnosis{} + return nil + } + // Unmarshal into a shadow type first so an unexpected statementId shape + // (string, missing fields, future additions) degrades gracefully instead + // of aborting the whole ApplyResult parse. + var raw struct { + Code string `json:"code"` + Message string `json:"message"` + StatementID json.RawMessage `json:"statementId"` + SuggestedFix string `json:"suggestedFix"` + } + if err := json.Unmarshal(trimmed, &raw); err != nil { + return err + } + d.Code = raw.Code + d.Message = raw.Message + d.SuggestedFix = raw.SuggestedFix + if len(bytes.TrimSpace(raw.StatementID)) == 0 || bytes.Equal(bytes.TrimSpace(raw.StatementID), []byte("null")) { + d.StatementID = nil + return nil + } + var loc ApplyStatementLocation + if err := json.Unmarshal(raw.StatementID, &loc); err == nil { + d.StatementID = &loc + return nil + } + // Fallback: accept a bare string (older pg-topo revisions) so we keep + // something printable instead of dropping the diagnostic entirely. + var asString string + if err := json.Unmarshal(raw.StatementID, &asString); err == nil { + d.StatementID = &ApplyStatementLocation{FilePath: asString} + } + return nil } type ApplyStatement struct { @@ -70,7 +140,13 @@ func (i *ApplyIssue) UnmarshalJSON(data []byte) error { return nil } -func formatApplyFailure(result ApplyResult) string { +// formatApplyFailure renders a human-readable summary of an unsuccessful +// pg-delta apply result. When verbose is false (the default CLI output), +// pg-topo diagnostics are collapsed to a single-line summary because they are +// static-analysis warnings – not fatal errors – and can number in the +// hundreds for large schemas. Passing verbose=true (set by --debug) expands +// them to the full per-diagnostic listing. +func formatApplyFailure(result ApplyResult, verbose bool) string { totalStatements := result.TotalStatements if totalStatements == 0 { totalStatements = result.TotalApplied + result.TotalSkipped + len(result.StuckStatements) @@ -91,6 +167,33 @@ func formatApplyFailure(result ApplyResult) string { lines = append(lines, formatApplyIssue(issue)) } } + if len(result.ValidationErrors) > 0 { + lines = append(lines, "Validation errors (from check_function_bodies=on pass):") + for _, issue := range result.ValidationErrors { + lines = append(lines, formatApplyIssue(issue)) + } + } + if len(result.Diagnostics) > 0 { + if verbose { + lines = append(lines, "Diagnostics:") + for _, d := range result.Diagnostics { + lines = append(lines, formatApplyDiagnosis(d)) + } + } else { + lines = append(lines, fmt.Sprintf("%d pg-topo diagnostic(s) omitted (re-run with --debug to view).", len(result.Diagnostics))) + } + } + // pg-delta may report status "error" without populating any issue arrays + // (e.g. an internal assertion in a future pg-delta release). Tell the user + // how to collect more information rather than leaving them with just the + // bare status line. + if len(result.Errors) == 0 && len(result.StuckStatements) == 0 && len(result.ValidationErrors) == 0 { + lines = append(lines, + "No per-statement diagnostics were reported by pg-delta.", + "Re-run with --debug to print the raw pg-delta payload, or open an issue at", + "https://github.com/supabase/pg-toolbelt/issues with the debug bundle attached.", + ) + } return strings.Join(lines, "\n") } @@ -104,6 +207,12 @@ func formatApplyIssue(issue ApplyIssue) string { } lines := []string{title} lines = append(lines, " "+formatApplyIssueMessage(issue)) + if detail := strings.TrimSpace(issue.Detail); detail != "" { + lines = append(lines, " Detail: "+detail) + } + if hint := strings.TrimSpace(issue.Hint); hint != "" { + lines = append(lines, " Hint: "+hint) + } if sql := formatStatementSQL(issue.Statement.SQL); sql != "" { lines = append(lines, " SQL: "+sql) } @@ -119,6 +228,9 @@ func formatApplyIssueMessage(issue ApplyIssue) string { if issue.Code != "" { metadata = append(metadata, "SQLSTATE "+issue.Code) } + if issue.Position > 0 { + metadata = append(metadata, fmt.Sprintf("position %d", issue.Position)) + } if issue.IsDependencyError { metadata = append(metadata, "dependency error") } @@ -128,6 +240,39 @@ func formatApplyIssueMessage(issue ApplyIssue) string { return fmt.Sprintf("%s (%s)", message, strings.Join(metadata, ", ")) } +func formatApplyDiagnosis(d ApplyDiagnosis) string { + message := strings.TrimSpace(d.Message) + if message == "" { + message = "unknown pg-delta diagnostic" + } + parts := []string{"- "} + if code := strings.TrimSpace(d.Code); code != "" { + parts = append(parts, "["+code+"] ") + } + parts = append(parts, message) + if loc := formatStatementLocation(d.StatementID); loc != "" { + parts = append(parts, " ("+loc+")") + } + if fix := strings.TrimSpace(d.SuggestedFix); fix != "" { + parts = append(parts, "\n Suggested fix: "+fix) + } + return strings.Join(parts, "") +} + +func formatStatementLocation(loc *ApplyStatementLocation) string { + if loc == nil { + return "" + } + path := strings.TrimSpace(loc.FilePath) + if path == "" { + return "" + } + if loc.StatementIndex > 0 { + return fmt.Sprintf("%s#%d", path, loc.StatementIndex) + } + return path +} + func formatStatementSQL(sql string) string { normalized := strings.Join(strings.Fields(sql), " ") const maxLen = 120 @@ -188,13 +333,17 @@ func ApplyDeclarative(ctx context.Context, config pgconn.Config, fsys afero.Fs) return errors.Errorf("failed to parse pg-delta apply output: %w", err) } if result.Status != "success" { - if viper.GetBool("DEBUG") { + // Always print the human-readable summary so failures are actionable + // even when --debug is set. In debug mode the summary also expands + // pg-topo diagnostics inline and we additionally dump the raw + // pg-delta payload so users can forward it when reporting bugs. + verbose := viper.GetBool("DEBUG") + fmt.Fprintln(os.Stderr, formatApplyFailure(result, verbose)) + if verbose { if debugJSON := formatDebugJSON(stdout.Bytes()); len(debugJSON) > 0 { fmt.Fprintln(os.Stderr, "pg-delta apply result:") fmt.Fprintln(os.Stderr, debugJSON) } - } else { - fmt.Fprintln(os.Stderr, formatApplyFailure(result)) } return errors.Errorf("pg-delta declarative apply failed with status: %s", result.Status) } diff --git a/internal/pgdelta/apply_test.go b/internal/pgdelta/apply_test.go index af9cfb001..bef780269 100644 --- a/internal/pgdelta/apply_test.go +++ b/internal/pgdelta/apply_test.go @@ -79,7 +79,7 @@ func TestFormatApplyFailure(t *testing.T) { }, } - formatted := formatApplyFailure(result) + formatted := formatApplyFailure(result, false) assertContains(t, formatted, `pg-delta apply returned status "stuck"`) assertContains(t, formatted, `29/34 statements applied in 2 round(s)`) assertContains(t, formatted, `cluster/extensions/pgmq.sql:0 [CREATE_EXTENSION]`) @@ -87,9 +87,238 @@ func TestFormatApplyFailure(t *testing.T) { assertContains(t, formatted, `SQL: CREATE EXTENSION pgmq WITH SCHEMA pgmq;`) } +// TestApplyResultUnmarshalValidationErrors reproduces the payload shape pg-delta +// emits when the final check_function_bodies=on pass fails: totalApplied +// matches totalStatements, errors and stuckStatements are empty, but status is +// "error" because validationErrors is non-empty. +func TestApplyResultUnmarshalValidationErrors(t *testing.T) { + raw := []byte(`{ + "status": "error", + "totalStatements": 1633, + "totalRounds": 1, + "totalApplied": 1633, + "totalSkipped": 0, + "errors": [], + "stuckStatements": [], + "validationErrors": [ + { + "statement": { + "id": "public/functions/my_function.sql:0", + "sql": "CREATE FUNCTION public.my_function() RETURNS integer LANGUAGE sql AS $$ SELECT missing_column FROM users $$;", + "statementClass": "CREATE_FUNCTION" + }, + "code": "42703", + "message": "column \"missing_column\" does not exist", + "isDependencyError": false, + "position": 8, + "hint": "Perhaps you meant to reference the column \"users.missing_column_renamed\"." + } + ] + }`) + + var result ApplyResult + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("json.Unmarshal() error = %v", err) + } + + if got, want := len(result.ValidationErrors), 1; got != want { + t.Fatalf("len(ValidationErrors) = %d, want %d", got, want) + } + + issue := result.ValidationErrors[0] + if issue.Statement == nil { + t.Fatal("expected structured statement details") + } + if got, want := issue.Statement.ID, "public/functions/my_function.sql:0"; got != want { + t.Fatalf("Statement.ID = %q, want %q", got, want) + } + if got, want := issue.Code, "42703"; got != want { + t.Fatalf("Code = %q, want %q", got, want) + } + if got, want := issue.Position, 8; got != want { + t.Fatalf("Position = %d, want %d", got, want) + } + if issue.Hint == "" { + t.Fatal("expected Hint to be preserved") + } +} + +func TestFormatApplyFailureValidationErrors(t *testing.T) { + result := ApplyResult{ + Status: "error", + TotalStatements: 1633, + TotalRounds: 1, + TotalApplied: 1633, + TotalSkipped: 0, + ValidationErrors: []ApplyIssue{ + { + Statement: &ApplyStatement{ + ID: "public/functions/my_function.sql:0", + SQL: "CREATE FUNCTION public.my_function() RETURNS integer LANGUAGE sql AS $$ SELECT missing_column FROM users $$;", + StatementClass: "CREATE_FUNCTION", + }, + Code: "42703", + Message: `column "missing_column" does not exist`, + Position: 8, + Hint: `Perhaps you meant to reference the column "users.missing_column_renamed".`, + }, + }, + } + + formatted := formatApplyFailure(result, false) + assertContains(t, formatted, `pg-delta apply returned status "error"`) + assertContains(t, formatted, `1633/1633 statements applied in 1 round(s)`) + assertContains(t, formatted, "Validation errors (from check_function_bodies=on pass):") + assertContains(t, formatted, "public/functions/my_function.sql:0 [CREATE_FUNCTION]") + assertContains(t, formatted, `column "missing_column" does not exist (SQLSTATE 42703, position 8)`) + assertContains(t, formatted, "Hint: Perhaps you meant to reference the column") +} + +// TestFormatApplyFailureNoDiagnostics exercises the fallback text we render +// when pg-delta returns status=error without any structured issues. The user +// originally reported seeing a bare error message in this situation. +func TestFormatApplyFailureNoDiagnostics(t *testing.T) { + result := ApplyResult{ + Status: "error", + TotalStatements: 1633, + TotalRounds: 1, + TotalApplied: 1633, + TotalSkipped: 0, + } + + formatted := formatApplyFailure(result, false) + assertContains(t, formatted, `pg-delta apply returned status "error"`) + assertContains(t, formatted, "No per-statement diagnostics were reported by pg-delta") + assertContains(t, formatted, "--debug") +} + +// TestApplyResultUnmarshalRealWorldPayload covers the full shape pg-delta emits +// in practice, including diagnostics whose statementId is an object. Before we +// made ApplyDiagnosis.UnmarshalJSON defensive, this payload caused the entire +// result parse to fail with "cannot unmarshal object into Go struct field +// ApplyDiagnosis.diagnostics.statementId of type string", which in turn hid +// the real validation error from the user. +func TestApplyResultUnmarshalRealWorldPayload(t *testing.T) { + raw := []byte(`{ + "status": "error", + "totalStatements": 1625, + "totalRounds": 1, + "totalApplied": 1625, + "totalSkipped": 0, + "errors": [], + "stuckStatements": [], + "validationErrors": [ + { + "statement": { + "id": "schemas/public/functions/create_device.sql:0", + "sql": "CREATE FUNCTION public.create_device () RETURNS void LANGUAGE plpgsql AS $function$BEGIN Invalid sql statement; END;$function$;", + "statementClass": "CREATE_FUNCTION" + }, + "code": "42601", + "message": "syntax error at or near \"Invalid\"", + "isDependencyError": false, + "position": 541 + } + ], + "diagnostics": [ + { + "code": "UNRESOLVED_DEPENDENCY", + "message": "No producer found for 'function:pgmq:delete:(unknown,unknown)'.", + "statementId": { + "filePath": "schemas/public/functions/pgmq_delete.sql", + "statementIndex": 0, + "sourceOffset": 0 + }, + "objectRefs": [ + {"kind": "function", "name": "delete", "schema": "pgmq", "signature": "(unknown,unknown)"} + ], + "suggestedFix": "Add the missing statement to your SQL set or declare an explicit pg-topo annotation.", + "details": { + "requiredObjectKey": "function:pgmq:delete:(unknown,unknown)", + "candidateObjectKeys": [] + } + } + ] + }`) + + var result ApplyResult + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("json.Unmarshal() error = %v", err) + } + + if got, want := len(result.ValidationErrors), 1; got != want { + t.Fatalf("len(ValidationErrors) = %d, want %d", got, want) + } + if got, want := result.ValidationErrors[0].Message, `syntax error at or near "Invalid"`; got != want { + t.Fatalf("ValidationErrors[0].Message = %q, want %q", got, want) + } + + if got, want := len(result.Diagnostics), 1; got != want { + t.Fatalf("len(Diagnostics) = %d, want %d", got, want) + } + diag := result.Diagnostics[0] + if diag.StatementID == nil { + t.Fatal("expected StatementID to be preserved as a structured location") + } + if got, want := diag.StatementID.FilePath, "schemas/public/functions/pgmq_delete.sql"; got != want { + t.Fatalf("StatementID.FilePath = %q, want %q", got, want) + } + if got, want := diag.Code, "UNRESOLVED_DEPENDENCY"; got != want { + t.Fatalf("Code = %q, want %q", got, want) + } + if diag.SuggestedFix == "" { + t.Fatal("expected SuggestedFix to be preserved") + } + + // Default (non-verbose) output collapses the diagnostics to a single line + // so the user isn't flooded with pg-topo warnings on large schemas. + formatted := formatApplyFailure(result, false) + assertContains(t, formatted, "Validation errors (from check_function_bodies=on pass):") + assertContains(t, formatted, "schemas/public/functions/create_device.sql:0 [CREATE_FUNCTION]") + assertContains(t, formatted, `syntax error at or near "Invalid" (SQLSTATE 42601, position 541)`) + assertContains(t, formatted, "1 pg-topo diagnostic(s) omitted (re-run with --debug to view).") + assertNotContains(t, formatted, "[UNRESOLVED_DEPENDENCY]") + + // Verbose mode (triggered by --debug) expands the diagnostics inline. + verbose := formatApplyFailure(result, true) + assertContains(t, verbose, "Diagnostics:") + assertContains(t, verbose, "[UNRESOLVED_DEPENDENCY]") + assertContains(t, verbose, "schemas/public/functions/pgmq_delete.sql") + assertNotContains(t, verbose, "pg-topo diagnostic(s) omitted") +} + +// TestApplyDiagnosisFallbackStatementIdString covers the defensive path where +// pg-topo emits statementId as a string (older revisions) so the diagnostic +// still survives the parse. +func TestApplyDiagnosisFallbackStatementIdString(t *testing.T) { + raw := []byte(`{ + "code": "LEGACY", + "message": "legacy diagnostic shape", + "statementId": "schemas/foo.sql:0" + }`) + + var d ApplyDiagnosis + if err := json.Unmarshal(raw, &d); err != nil { + t.Fatalf("json.Unmarshal() error = %v", err) + } + if d.StatementID == nil { + t.Fatal("expected StatementID to be populated from legacy string shape") + } + if got, want := d.StatementID.FilePath, "schemas/foo.sql:0"; got != want { + t.Fatalf("StatementID.FilePath = %q, want %q", got, want) + } +} + func assertContains(t *testing.T, text, want string) { t.Helper() if !strings.Contains(text, want) { t.Fatalf("expected %q to contain %q", text, want) } } + +func assertNotContains(t *testing.T, text, unwanted string) { + t.Helper() + if strings.Contains(text, unwanted) { + t.Fatalf("expected %q to NOT contain %q", text, unwanted) + } +} diff --git a/internal/pgdelta/templates/pgdelta_declarative_apply.ts b/internal/pgdelta/templates/pgdelta_declarative_apply.ts index 1cf19c29c..a6589bf2b 100644 --- a/internal/pgdelta/templates/pgdelta_declarative_apply.ts +++ b/internal/pgdelta/templates/pgdelta_declarative_apply.ts @@ -36,6 +36,13 @@ try { totalSkipped: apply.totalSkipped ?? 0, errors: apply.errors ?? [], stuckStatements: apply.stuckStatements ?? [], + // validationErrors is populated when the final + // check_function_bodies=on pass catches issues that didn't surface during + // the initial apply rounds (e.g. a function body that references a + // column whose type changed). Without surfacing this field, callers see + // status=error with empty errors/stuckStatements and no actionable info. + validationErrors: apply.validationErrors ?? [], + diagnostics: result.diagnostics ?? [], }; console.log(JSON.stringify(payload)); if (apply.status !== "success") {