diff --git a/pgtype/pgtype.go b/pgtype/pgtype.go index 253d80966..a9d932524 100644 --- a/pgtype/pgtype.go +++ b/pgtype/pgtype.go @@ -1999,6 +1999,9 @@ func (m *Map) Encode(oid uint32, formatCode int16, value any, buf []byte) (newBu // // This uses the type of v to look up the PostgreSQL OID that v presumably came from. This means v must be registered // with m by calling RegisterDefaultPgType. +// +// As of Go 1.27, database/sql calls the driver directly to scan columns when using pgx's stdlib package, so this is +// no longer necessary. func (m *Map) SQLScanner(v any) sql.Scanner { if s, ok := v.(sql.Scanner); ok { return s diff --git a/stdlib/bench_go1.27_test.go b/stdlib/bench_go1.27_test.go new file mode 100644 index 000000000..9c9efd839 --- /dev/null +++ b/stdlib/bench_go1.27_test.go @@ -0,0 +1,29 @@ +//go:build go1.27 + +package stdlib_test + +import ( + "testing" +) + +// BenchmarkStringArrayScanResultDirect scans a PostgreSQL text[] result directly into a +// []string via the new driver.RowsColumnScanner interface (no SQLScanner wrapper). This +// is the new path enabled by Go 1.27. +func BenchmarkStringArrayScanResultDirect(b *testing.B) { + db := openDB(b) + defer closeDB(b, db) + + query := benchStringArraySelectSQL() + b.ResetTimer() + + for b.Loop() { + var result []string + err := db.QueryRow(query).Scan(&result) + if err != nil { + b.Fatal(err) + } + if len(result) != benchStringArraySize { + b.Fatalf("Expected %d, got %d", benchStringArraySize, len(result)) + } + } +} diff --git a/stdlib/bench_test.go b/stdlib/bench_test.go index 460e5e358..5da65cf4c 100644 --- a/stdlib/bench_test.go +++ b/stdlib/bench_test.go @@ -1,6 +1,7 @@ package stdlib_test import ( + "context" "database/sql" "fmt" "os" @@ -8,6 +9,9 @@ import ( "strings" "testing" "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" ) func getSelectRowsCounts(b *testing.B) []int64 { @@ -107,3 +111,103 @@ func BenchmarkSelectRowsScanNull(b *testing.B) { }) } } + +const benchStringArraySize = 10 + +func benchStringArrayInput() []string { + input := make([]string, benchStringArraySize) + for i := range input { + input[i] = fmt.Sprintf("String %d", i) + } + return input +} + +func benchStringArraySelectSQL() string { + var b strings.Builder + b.WriteString("select array[") + for i := 0; i < benchStringArraySize; i++ { + if i > 0 { + b.WriteString(",") + } + fmt.Fprintf(&b, "'String %d'", i) + } + b.WriteString("]::text[]") + return b.String() +} + +// BenchmarkStringArrayEncodeArgument measures encoding a Go []string into a PostgreSQL +// array parameter. The encode path is unchanged by RowsColumnScanner, so this number +// should be the same on Go 1.26 and Go 1.27. +func BenchmarkStringArrayEncodeArgument(b *testing.B) { + db := openDB(b) + defer closeDB(b, db) + + input := benchStringArrayInput() + b.ResetTimer() + + for b.Loop() { + var n int64 + err := db.QueryRow("select cardinality($1::text[])", input).Scan(&n) + if err != nil { + b.Fatal(err) + } + if n != int64(len(input)) { + b.Fatalf("Expected %d, got %d", len(input), n) + } + } +} + +// BenchmarkStringArrayScanResultSQLScanner scans a PostgreSQL text[] result into a +// []string using the *pgtype.Map.SQLScanner adapter. This is the only way to do this with +// stdlib before Go 1.27. +func BenchmarkStringArrayScanResultSQLScanner(b *testing.B) { + db := openDB(b) + defer closeDB(b, db) + + m := pgtype.NewMap() + query := benchStringArraySelectSQL() + b.ResetTimer() + + for b.Loop() { + var result []string + err := db.QueryRow(query).Scan(m.SQLScanner(&result)) + if err != nil { + b.Fatal(err) + } + if len(result) != benchStringArraySize { + b.Fatalf("Expected %d, got %d", benchStringArraySize, len(result)) + } + } +} + +// BenchmarkStringArrayScanResultNativePgx scans a PostgreSQL text[] result into a +// []string using native pgx (bypassing database/sql entirely). This is the upper-bound +// performance reference: the stdlib variants pay for the extra database/sql layer. +func BenchmarkStringArrayScanResultNativePgx(b *testing.B) { + ctx := context.Background() + + config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE")) + if err != nil { + b.Fatal(err) + } + + conn, err := pgx.ConnectConfig(ctx, config) + if err != nil { + b.Fatal(err) + } + defer conn.Close(ctx) + + query := benchStringArraySelectSQL() + b.ResetTimer() + + for b.Loop() { + var result []string + err := conn.QueryRow(ctx, query).Scan(&result) + if err != nil { + b.Fatal(err) + } + if len(result) != benchStringArraySize { + b.Fatalf("Expected %d, got %d", benchStringArraySize, len(result)) + } + } +} diff --git a/stdlib/sql.go b/stdlib/sql.go index a37d58c41..454827a8a 100644 --- a/stdlib/sql.go +++ b/stdlib/sql.go @@ -57,12 +57,25 @@ // // # PostgreSQL Specific Data Types // -// The pgtype package provides support for PostgreSQL specific types. *pgtype.Map.SQLScanner is an adapter that makes -// these types usable as a sql.Scanner. +// As of Go 1.27, database/sql allows drivers to implement their own scanning logic by implementing the +// driver.RowsColumnScanner interface. This allows PostgreSQL types such as arrays to be scanned directly into Go +// values such as slices. +// +// var a []int64 +// err := db.QueryRow("select '{1,2,3}'::bigint[]").Scan(&a) +// +// In older versions of Go, *pgtype.Map.SQLScanner can be used as an adapter that makes these types usable as a +// sql.Scanner. // // m := pgtype.NewMap() // var a []int64 // err := db.QueryRow("select '{1,2,3}'::bigint[]").Scan(m.SQLScanner(&a)) +// +// The pgtype package provides support for PostgreSQL specific types. These types can be used directly in Go 1.27 and +// with *pgtype.Map.SQLScanner in older Go versions. +// +// var r pgtype.Range[pgtype.Int4] +// err := db.QueryRow("select int4range(1, 5)").Scan(&r) package stdlib import ( diff --git a/stdlib/sql_go1.27.go b/stdlib/sql_go1.27.go new file mode 100644 index 000000000..e84595863 --- /dev/null +++ b/stdlib/sql_go1.27.go @@ -0,0 +1,62 @@ +//go:build go1.27 + +package stdlib + +import ( + "database/sql" + "io" +) + +// NextRow implements the driver.RowsColumnScanner interface. It advances to the +// next row of data and returns io.EOF when there are no more rows. +func (r *Rows) NextRow() error { + var more bool + if r.skipNext { + more = r.skipNextMore + r.skipNext = false + } else { + more = r.rows.Next() + } + + if !more { + if err := r.rows.Err(); err != nil { + return err + } + return io.EOF + } + + return nil +} + +// ScanColumn implements the driver.RowsColumnScanner interface. It uses the +// pgx type map to scan the raw bytes of the column at the given index directly +// into dest. This allows database/sql callers to scan into any type supported +// by pgx, such as Go slices, pgtype.Array, and pgtype.Range. +// +// When pgx does not have a scan plan for dest, ScanColumn falls back to +// sql.ConvertAssign on a driver.Value produced by the column codec. This gives +// database/sql callers the same conversion semantics they had before Go 1.27 +// (e.g., scanning a PostgreSQL boolean into a *string). +func (r *Rows) ScanColumn(index int, dest any) error { + m := r.conn.conn.TypeMap() + fd := r.rows.FieldDescriptions()[index] + src := r.rows.RawValues()[index] + + err := m.Scan(fd.DataTypeOID, fd.Format, src, dest) + if err == nil { + return nil + } + + dt, ok := m.TypeForOID(fd.DataTypeOID) + if !ok { + return err + } + value, decodeErr := dt.Codec.DecodeDatabaseSQLValue(m, fd.DataTypeOID, fd.Format, src) + if decodeErr != nil { + return err + } + if convertErr := sql.ConvertAssign(dest, value); convertErr != nil { + return err + } + return nil +} diff --git a/stdlib/sql_go1.27_test.go b/stdlib/sql_go1.27_test.go new file mode 100644 index 000000000..c7f8cec49 --- /dev/null +++ b/stdlib/sql_go1.27_test.go @@ -0,0 +1,218 @@ +//go:build go1.27 + +package stdlib_test + +import ( + "database/sql" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + pgx "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/stdlib" +) + +func testWithKnownOIDQueryExecModes(t *testing.T, f func(t *testing.T, db *sql.DB)) { + for _, mode := range []pgx.QueryExecMode{ + pgx.QueryExecModeCacheStatement, + pgx.QueryExecModeCacheDescribe, + pgx.QueryExecModeDescribeExec, + } { + t.Run(mode.String(), + func(t *testing.T) { + config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE")) + require.NoError(t, err) + + config.DefaultQueryExecMode = mode + db := stdlib.OpenDB(*config) + defer func() { + err := db.Close() + require.NoError(t, err) + }() + + f(t, db) + + ensureDBValid(t, db) + }, + ) + } +} + +func TestGoArray(t *testing.T) { + testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) { + var names []string + + err := db.QueryRow("select array['John', 'Jane']::text[]").Scan(&names) + require.NoError(t, err) + require.Equal(t, []string{"John", "Jane"}, names) + + var n int + err = db.QueryRow("select cardinality($1::text[])", names).Scan(&n) + require.NoError(t, err) + require.EqualValues(t, 2, n) + + err = db.QueryRow("select null::text[]").Scan(&names) + require.NoError(t, err) + require.Nil(t, names) + }) +} + +func TestGoArrayOfDriverValuer(t *testing.T) { + // Because []sql.NullString is not a registered type on the connection, it will only work with known OIDs. + testWithKnownOIDQueryExecModes(t, func(t *testing.T, db *sql.DB) { + var names []sql.NullString + + err := db.QueryRow("select array['John', null, 'Jane']::text[]").Scan(&names) + require.NoError(t, err) + require.Equal(t, []sql.NullString{{String: "John", Valid: true}, {}, {String: "Jane", Valid: true}}, names) + + var n int + err = db.QueryRow("select cardinality($1::text[])", names).Scan(&n) + require.NoError(t, err) + require.EqualValues(t, 3, n) + + err = db.QueryRow("select null::text[]").Scan(&names) + require.NoError(t, err) + require.Nil(t, names) + }) +} + +func TestPGTypeFlatArray(t *testing.T) { + testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) { + var names pgtype.FlatArray[string] + + err := db.QueryRow("select array['John', 'Jane']::text[]").Scan(&names) + require.NoError(t, err) + require.Equal(t, pgtype.FlatArray[string]{"John", "Jane"}, names) + + var n int + err = db.QueryRow("select cardinality($1::text[])", names).Scan(&n) + require.NoError(t, err) + require.EqualValues(t, 2, n) + + err = db.QueryRow("select null::text[]").Scan(&names) + require.NoError(t, err) + require.Nil(t, names) + }) +} + +func TestPGTypeArray(t *testing.T) { + testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) { + skipCockroachDB(t, db, "Server does not support nested arrays") + + var matrix pgtype.Array[int64] + + err := db.QueryRow("select '{{1,2,3},{4,5,6}}'::bigint[]").Scan(&matrix) + require.NoError(t, err) + require.Equal(t, + pgtype.Array[int64]{ + Elements: []int64{1, 2, 3, 4, 5, 6}, + Dims: []pgtype.ArrayDimension{ + {Length: 2, LowerBound: 1}, + {Length: 3, LowerBound: 1}, + }, + Valid: true}, + matrix) + + var equal bool + err = db.QueryRow("select '{{1,2,3},{4,5,6}}'::bigint[] = $1::bigint[]", matrix).Scan(&equal) + require.NoError(t, err) + require.Equal(t, true, equal) + + err = db.QueryRow("select null::bigint[]").Scan(&matrix) + require.NoError(t, err) + assert.Equal(t, pgtype.Array[int64]{Elements: nil, Dims: nil, Valid: false}, matrix) + }) +} + +func TestConnQueryPGTypeRange(t *testing.T) { + testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) { + skipCockroachDB(t, db, "Server does not support int4range") + + var r pgtype.Range[pgtype.Int4] + err := db.QueryRow("select int4range(1, 5)").Scan(&r) + require.NoError(t, err) + assert.Equal( + t, + pgtype.Range[pgtype.Int4]{ + Lower: pgtype.Int4{Int32: 1, Valid: true}, + Upper: pgtype.Int4{Int32: 5, Valid: true}, + LowerType: pgtype.Inclusive, + UpperType: pgtype.Exclusive, + Valid: true, + }, + r) + + var equal bool + err = db.QueryRow("select int4range(1, 5) = $1::int4range", r).Scan(&equal) + require.NoError(t, err) + require.Equal(t, true, equal) + + err = db.QueryRow("select null::int4range").Scan(&r) + require.NoError(t, err) + assert.Equal(t, pgtype.Range[pgtype.Int4]{}, r) + }) +} + +// TestScanFallbackToConvertAssign covers cases where pgx has no scan plan for the +// destination type (e.g., scanning a binary boolean into a *string). The driver should +// fall back to sql.ConvertAssign so that callers get the same conversion semantics they +// had before Go 1.27. +// +// The known-OID modes use binary format for bool, where pgx would otherwise fail with +// "cannot scan bool (OID 16) in binary format into *string". With the ConvertAssign +// fallback, the bool decodes to a driver.Value and ConvertAssign produces "true"/"false". +func TestScanFallbackToConvertAssign(t *testing.T) { + testWithKnownOIDQueryExecModes(t, func(t *testing.T, db *sql.DB) { + var s string + err := db.QueryRow("select true").Scan(&s) + require.NoError(t, err) + require.Equal(t, "true", s) + + err = db.QueryRow("select false").Scan(&s) + require.NoError(t, err) + require.Equal(t, "false", s) + }) +} + +func TestConnQueryPGTypeMultirange(t *testing.T) { + testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) { + skipCockroachDB(t, db, "Server does not support int4range") + skipPostgreSQLVersionLessThan(t, db, 14) + + var r pgtype.Multirange[pgtype.Range[pgtype.Int4]] + err := db.QueryRow("select int4multirange(int4range(1, 5), int4range(7,9))").Scan(&r) + require.NoError(t, err) + assert.Equal( + t, + pgtype.Multirange[pgtype.Range[pgtype.Int4]]{ + { + Lower: pgtype.Int4{Int32: 1, Valid: true}, + Upper: pgtype.Int4{Int32: 5, Valid: true}, + LowerType: pgtype.Inclusive, + UpperType: pgtype.Exclusive, + Valid: true, + }, + { + Lower: pgtype.Int4{Int32: 7, Valid: true}, + Upper: pgtype.Int4{Int32: 9, Valid: true}, + LowerType: pgtype.Inclusive, + UpperType: pgtype.Exclusive, + Valid: true, + }, + }, + r) + + var equal bool + err = db.QueryRow("select int4multirange(int4range(1, 5), int4range(7,9)) = $1::int4multirange", r).Scan(&equal) + require.NoError(t, err) + require.Equal(t, true, equal) + + err = db.QueryRow("select null::int4multirange").Scan(&r) + require.NoError(t, err) + require.Nil(t, r) + }) +}