diff --git a/.gitignore b/.gitignore index 90e1de5..7fc2429 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ __debug* .vscode/*.log *.iml .idea/ +bin/ diff --git a/Makefile b/Makefile index eab5514..f79acbc 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,9 @@ VGO=go GOFILES := $(shell find pkg -name '*.go' -print) GOBIN := $(shell $(VGO) env GOPATH)/bin +LOCALBIN := $(shell pwd)/bin LINT := $(GOBIN)/golangci-lint -MOCKERY := $(GOBIN)/mockery +MOCKERY := $(LOCALBIN)/mockery # Expect that FireFly compiles with CGO disabled CGO_ENABLED=0 @@ -19,7 +20,7 @@ coverage: test coverage.html lint: ${LINT} GOGC=20 $(LINT) run -v --timeout 5m --fast --allow-parallel-runners ${MOCKERY}: - $(VGO) install github.com/vektra/mockery/v2@latest + GOBIN=$(LOCALBIN) $(VGO) install github.com/vektra/mockery/v2@latest ${LINT}: $(VGO) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.8 dbmigrate: @@ -37,6 +38,7 @@ $(eval $(call makemock, pkg/httpserver, GoHTTPServer, httpserve $(eval $(call makemock, pkg/auth, Plugin, authmocks)) $(eval $(call makemock, pkg/wsserver, Protocol, wsservermocks)) $(eval $(call makemock, pkg/wsserver, WebSocketServer, wsservermocks)) +$(eval $(call makemock, pkg/metric, MetricsRegistry, metricmocks)) $(eval $(call makemock, pkg/dbsql, CRUD, crudmocks)) firefly-common: ${GOFILES} diff --git a/mocks/authmocks/plugin.go b/mocks/authmocks/plugin.go index 0ca63b3..5fb9dea 100644 --- a/mocks/authmocks/plugin.go +++ b/mocks/authmocks/plugin.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.4. DO NOT EDIT. +// Code generated by mockery v2.53.6. DO NOT EDIT. package authmocks diff --git a/mocks/crudmocks/crud.go b/mocks/crudmocks/crud.go index ca8768d..acd259d 100644 --- a/mocks/crudmocks/crud.go +++ b/mocks/crudmocks/crud.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.4. DO NOT EDIT. +// Code generated by mockery v2.53.6. DO NOT EDIT. package crudmocks diff --git a/mocks/dbmigratemocks/driver.go b/mocks/dbmigratemocks/driver.go index 50b5a6d..d83f50e 100644 --- a/mocks/dbmigratemocks/driver.go +++ b/mocks/dbmigratemocks/driver.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.4. DO NOT EDIT. +// Code generated by mockery v2.53.6. DO NOT EDIT. package dbmigratemocks diff --git a/mocks/httpservermocks/go_http_server.go b/mocks/httpservermocks/go_http_server.go index 418d166..1c69643 100644 --- a/mocks/httpservermocks/go_http_server.go +++ b/mocks/httpservermocks/go_http_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.4. DO NOT EDIT. +// Code generated by mockery v2.53.6. DO NOT EDIT. package httpservermocks diff --git a/mocks/metricmocks/metrics_registry.go b/mocks/metricmocks/metrics_registry.go new file mode 100644 index 0000000..e652530 --- /dev/null +++ b/mocks/metricmocks/metrics_registry.go @@ -0,0 +1,167 @@ +// Code generated by mockery v2.53.6. DO NOT EDIT. + +package metricmocks + +import ( + context "context" + http "net/http" + + metric "github.com/hyperledger/firefly-common/pkg/metric" + mock "github.com/stretchr/testify/mock" + + prometheus "github.com/prometheus/client_golang/prometheus" + + promhttp "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// MetricsRegistry is an autogenerated mock type for the MetricsRegistry type +type MetricsRegistry struct { + mock.Mock +} + +// GetGatherer provides a mock function with no fields +func (_m *MetricsRegistry) GetGatherer() prometheus.Gatherer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetGatherer") + } + + var r0 prometheus.Gatherer + if rf, ok := ret.Get(0).(func() prometheus.Gatherer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(prometheus.Gatherer) + } + } + + return r0 +} + +// GetHTTPMetricsInstrumentationsMiddlewareForSubsystem provides a mock function with given fields: ctx, subsystem +func (_m *MetricsRegistry) GetHTTPMetricsInstrumentationsMiddlewareForSubsystem(ctx context.Context, subsystem string) (func(http.Handler) http.Handler, error) { + ret := _m.Called(ctx, subsystem) + + if len(ret) == 0 { + panic("no return value specified for GetHTTPMetricsInstrumentationsMiddlewareForSubsystem") + } + + var r0 func(http.Handler) http.Handler + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (func(http.Handler) http.Handler, error)); ok { + return rf(ctx, subsystem) + } + if rf, ok := ret.Get(0).(func(context.Context, string) func(http.Handler) http.Handler); ok { + r0 = rf(ctx, subsystem) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func(http.Handler) http.Handler) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, subsystem) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HTTPHandler provides a mock function with given fields: ctx, handlerOpts +func (_m *MetricsRegistry) HTTPHandler(ctx context.Context, handlerOpts promhttp.HandlerOpts) (http.Handler, error) { + ret := _m.Called(ctx, handlerOpts) + + if len(ret) == 0 { + panic("no return value specified for HTTPHandler") + } + + var r0 http.Handler + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, promhttp.HandlerOpts) (http.Handler, error)); ok { + return rf(ctx, handlerOpts) + } + if rf, ok := ret.Get(0).(func(context.Context, promhttp.HandlerOpts) http.Handler); ok { + r0 = rf(ctx, handlerOpts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(http.Handler) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, promhttp.HandlerOpts) error); ok { + r1 = rf(ctx, handlerOpts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MustRegisterCollector provides a mock function with given fields: collector +func (_m *MetricsRegistry) MustRegisterCollector(collector prometheus.Collector) { + _m.Called(collector) +} + +// NewHTTPMetricsInstrumentationsForSubsystem provides a mock function with given fields: ctx, subsystem, useRouteTemplate, reqDurationBuckets, labels +func (_m *MetricsRegistry) NewHTTPMetricsInstrumentationsForSubsystem(ctx context.Context, subsystem string, useRouteTemplate bool, reqDurationBuckets []float64, labels map[string]string) error { + ret := _m.Called(ctx, subsystem, useRouteTemplate, reqDurationBuckets, labels) + + if len(ret) == 0 { + panic("no return value specified for NewHTTPMetricsInstrumentationsForSubsystem") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, bool, []float64, map[string]string) error); ok { + r0 = rf(ctx, subsystem, useRouteTemplate, reqDurationBuckets, labels) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewMetricsManagerForSubsystem provides a mock function with given fields: ctx, subsystem +func (_m *MetricsRegistry) NewMetricsManagerForSubsystem(ctx context.Context, subsystem string) (metric.MetricsManager, error) { + ret := _m.Called(ctx, subsystem) + + if len(ret) == 0 { + panic("no return value specified for NewMetricsManagerForSubsystem") + } + + var r0 metric.MetricsManager + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (metric.MetricsManager, error)); ok { + return rf(ctx, subsystem) + } + if rf, ok := ret.Get(0).(func(context.Context, string) metric.MetricsManager); ok { + r0 = rf(ctx, subsystem) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(metric.MetricsManager) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, subsystem) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewMetricsRegistry creates a new instance of MetricsRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMetricsRegistry(t interface { + mock.TestingT + Cleanup(func()) +}) *MetricsRegistry { + mock := &MetricsRegistry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/mocks/wsservermocks/protocol.go b/mocks/wsservermocks/protocol.go index 9f6e82b..14d4654 100644 --- a/mocks/wsservermocks/protocol.go +++ b/mocks/wsservermocks/protocol.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.4. DO NOT EDIT. +// Code generated by mockery v2.53.6. DO NOT EDIT. package wsservermocks diff --git a/mocks/wsservermocks/web_socket_server.go b/mocks/wsservermocks/web_socket_server.go index 23a534d..e5750ba 100644 --- a/mocks/wsservermocks/web_socket_server.go +++ b/mocks/wsservermocks/web_socket_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.53.4. DO NOT EDIT. +// Code generated by mockery v2.53.6. DO NOT EDIT. package wsservermocks diff --git a/pkg/dbsql/database.go b/pkg/dbsql/database.go index 616af34..0c19d0b 100644 --- a/pkg/dbsql/database.go +++ b/pkg/dbsql/database.go @@ -1,4 +1,4 @@ -// Copyright © 2024 Kaleido, Inc. +// Copyright © 2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -29,6 +29,7 @@ import ( "github.com/hyperledger/firefly-common/pkg/fftypes" "github.com/hyperledger/firefly-common/pkg/i18n" "github.com/hyperledger/firefly-common/pkg/log" + "github.com/prometheus/client_golang/prometheus/collectors" "github.com/golang-migrate/migrate/v4/database" // Import migrate file source @@ -43,6 +44,7 @@ type Database struct { features SQLFeatures connLimit int sequenceColumn string + dbName string } // PreCommitAccumulator is a structure that can accumulate state during @@ -57,6 +59,7 @@ type TXWrapper struct { sqlTX *sql.Tx preCommitAccumulator PreCommitAccumulator postCommit []func() + startTime time.Time } func (tx *TXWrapper) AddPostCommitHook(fn func()) { @@ -84,6 +87,12 @@ func (s *Database) Init(ctx context.Context, provider Provider, config config.Se return i18n.NewError(ctx, i18n.MsgMissingConfig, "url", fmt.Sprintf("database.%s", s.provider.Name())) } + dbName, err := provider.GetDatabaseName(config.GetString(SQLConfDatasourceURL)) + if err != nil { + return i18n.WrapError(ctx, err, i18n.MsgDBInitFailed) + } + s.dbName = dbName + if s.db, err = provider.Open(config.GetString(SQLConfDatasourceURL)); err != nil { return i18n.WrapError(ctx, err, i18n.MsgDBInitFailed) } @@ -106,6 +115,10 @@ func (s *Database) Init(ctx context.Context, provider Provider, config config.Se } } + if metricsRegistry != nil { // we have enabled metrics globally, so register the database stats collector for this database + metricsRegistry.MustRegisterCollector(collectors.NewDBStatsCollector(s.db, s.dbName)) + } + return nil } @@ -141,6 +154,7 @@ func (s *Database) RunAsGroup(ctx context.Context, fn func(ctx context.Context) } func (s *Database) applyDBMigrations(ctx context.Context, config config.Section, provider Provider) (err error) { + before := time.Now() var driver database.Driver providerClosable, isClosable := provider.(ProviderCloseableMigrationDriver) if isClosable { @@ -167,6 +181,7 @@ func (s *Database) applyDBMigrations(ctx context.Context, config config.Section, if err != nil && err != migrate.ErrNoChange { return i18n.WrapError(ctx, err, i18n.MsgDBMigrationFailed) } + s.observeMigration(ctx, time.Since(before)) return nil } @@ -182,7 +197,6 @@ func GetTXFromContext(ctx context.Context) *TXWrapper { } func (s *Database) BeginOrUseTx(ctx context.Context) (ctx1 context.Context, tx *TXWrapper, autoCommit bool, err error) { - tx = GetTXFromContext(ctx) if tx != nil { // There is s transaction on the context already. @@ -199,7 +213,8 @@ func (s *Database) BeginOrUseTx(ctx context.Context) (ctx1 context.Context, tx * return ctx1, nil, false, i18n.WrapError(ctx1, err, i18n.MsgDBBeginFailed) } tx = &TXWrapper{ - sqlTX: sqlTX, + sqlTX: sqlTX, + startTime: before, } ctx1 = context.WithValue(ctx1, txContextKey{}, tx) l.Debugf("SQL<- begin (%.2fms)", floatMillisSince(before)) @@ -231,10 +246,13 @@ func (s *Database) RunAsQueryTx(ctx context.Context, table string, tx *TXWrapper } else { rows, err = s.db.QueryContext(ctx, sqlQuery, args...) } + elapsed := time.Since(before) if err != nil { l.Errorf(`SQL query failed: %s sql=[ %s ]`, err, sqlQuery) + s.incOpError(ctx, table, "query") return nil, tx, i18n.WrapError(ctx, err, i18n.MsgDBQueryFailed) } + s.observeOp(ctx, table, "query", elapsed) l.Debugf(`SQL<- query %s (%.2fms)`, table, floatMillisSince(before)) return rows, tx, nil } @@ -272,16 +290,20 @@ func (s *Database) CountQuery(ctx context.Context, table string, tx *TXWrapper, } else { rows, err = s.db.QueryContext(ctx, sqlQuery, args...) } + elapsed := time.Since(before) if err != nil { l.Errorf(`SQL count query failed: %s sql=[ %s ]`, err, sqlQuery) + s.incOpError(ctx, table, "count") return count, i18n.WrapError(ctx, err, i18n.MsgDBQueryFailed) } defer rows.Close() if rows.Next() { if err = rows.Scan(&count); err != nil { + s.incOpError(ctx, table, "count") return count, i18n.WrapError(ctx, err, i18n.MsgDBReadErr, table) } } + s.observeOp(ctx, table, "count", elapsed) l.Debugf(`SQL<- count query %s: %d (%.2fms)`, table, count, floatMillisSince(before)) return count, nil } @@ -337,6 +359,7 @@ func (s *Database) InsertTxRows(ctx context.Context, table string, tx *TXWrapper if requestConflictEmptyResult && noInsert { l.Infof(`SQL insert returning partial result: %s`, err) } else { + s.incOpError(ctx, table, "insert") l.Errorf(`SQL insert failed (conflictEmptyRequested=%t) sql=[ %s ]: %s`, requestConflictEmptyResult, sqlQuery, err) } return i18n.WrapError(ctx, err, i18n.MsgDBInsertFailed) @@ -347,11 +370,13 @@ func (s *Database) InsertTxRows(ctx context.Context, table string, tx *TXWrapper } res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { + s.incOpError(ctx, table, "insert") l.Errorf(`SQL insert failed: %s sql=[ %s ]: %s`, err, sqlQuery, err) return i18n.WrapError(ctx, err, i18n.MsgDBInsertFailed) } sequences[0], _ = res.LastInsertId() } + s.observeOp(ctx, table, "insert", time.Since(before)) l.Debugf(`SQL<- insert %s sequences=%v (%.2fms)`, table, sequences, floatMillisSince(before)) if postCommit != nil { @@ -370,9 +395,11 @@ func (s *Database) DeleteTx(ctx context.Context, table string, tx *TXWrapper, q l.Tracef(`SQL-> delete query: %s args: %+v`, sqlQuery, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { + s.incOpError(ctx, table, "delete") l.Errorf(`SQL delete failed: %s sql=[ %s ]: %s`, err, sqlQuery, err) return i18n.WrapError(ctx, err, i18n.MsgDBDeleteFailed) } + s.observeOp(ctx, table, "delete", time.Since(before)) ra, _ := res.RowsAffected() l.Debugf(`SQL<- delete %s affected=%d (%.2fms)`, table, ra, floatMillisSince(before)) if ra < 1 { @@ -391,9 +418,11 @@ func (s *Database) ExecTx(ctx context.Context, table string, tx *TXWrapper, sqlQ l.Tracef(`SQL-> exec: %s args: %+v`, sqlQuery, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { + s.incOpError(ctx, table, "exec") l.Errorf(`SQL exec: %s sql=[ %s ]: %s`, err, sqlQuery, err) return nil, i18n.WrapError(ctx, err, i18n.MsgDBExecFailed) } + s.observeOp(ctx, table, "exec", time.Since(before)) ra, _ := res.RowsAffected() l.Debugf(`SQL<- exec: %s affected=%d (%.2fms)`, table, ra, floatMillisSince(before)) return res, nil @@ -409,9 +438,11 @@ func (s *Database) UpdateTx(ctx context.Context, table string, tx *TXWrapper, q l.Tracef(`SQL-> update query: %s (args: %+v)`, sqlQuery, args) res, err := tx.sqlTX.ExecContext(ctx, sqlQuery, args...) if err != nil { + s.incOpError(ctx, table, "update") l.Errorf(`SQL update failed: %s sql=[ %s ]`, err, sqlQuery) return -1, i18n.WrapError(ctx, err, i18n.MsgDBUpdateFailed) } + s.observeOp(ctx, table, "update", time.Since(before)) ra, _ := res.RowsAffected() l.Debugf(`SQL<- update %s affected=%d (%.2fms)`, table, ra, floatMillisSince(before)) @@ -447,9 +478,9 @@ func (s *Database) RollbackTx(ctx context.Context, tx *TXWrapper, autoCommit boo err := tx.sqlTX.Rollback() if err == nil { + s.observeTx(ctx, time.Since(tx.startTime), "rollback") log.L(ctx).Warnf("SQL! transaction rollback") - } - if err != nil && err != sql.ErrTxDone { + } else if err != sql.ErrTxDone { log.L(ctx).Errorf(`SQL rollback failed: %s`, err) } } @@ -471,14 +502,15 @@ func (s *Database) CommitTx(ctx context.Context, tx *TXWrapper, autoCommit bool) } } - before := time.Now() + commitBefore := time.Now() l.Tracef(`SQL-> commit`) err := tx.sqlTX.Commit() if err != nil { l.Errorf(`SQL commit failed: %s`, err) return i18n.WrapError(ctx, err, i18n.MsgDBCommitFailed) } - l.Debugf(`SQL<- commit (%.2fms)`, floatMillisSince(before)) + s.observeTx(ctx, time.Since(tx.startTime), "commit") + l.Debugf(`SQL<- commit (%.2fms)`, floatMillisSince(commitBefore)) // Emit any post commit events (these aren't currently allowed to cause errors) for i, pce := range tx.postCommit { diff --git a/pkg/dbsql/database_test.go b/pkg/dbsql/database_test.go index 49ac5fe..8a42922 100644 --- a/pkg/dbsql/database_test.go +++ b/pkg/dbsql/database_test.go @@ -25,9 +25,11 @@ import ( "github.com/DATA-DOG/go-sqlmock" sq "github.com/Masterminds/squirrel" "github.com/hyperledger/firefly-common/pkg/ffapi" + "github.com/hyperledger/firefly-common/pkg/metric" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" ) type testPreCommitAccumulator struct { @@ -78,6 +80,13 @@ func TestInitDatabaseFeatures(t *testing.T) { assert.Equal(t, false, s.Features().MultiRowInsert) } +func TestInitDatabaseGetDatabaseNameFailed(t *testing.T) { + mp := NewMockProvider() + mp.GetDatabaseNameError = fmt.Errorf("pop") + err := mp.Database.Init(context.Background(), mp, mp.config) + assert.Regexp(t, "FF00173.*pop", err) +} + func TestInitDatabaseOpenFailed(t *testing.T) { mp := NewMockProvider() mp.OpenError = fmt.Errorf("pop") @@ -93,8 +102,13 @@ func TestInitDatabaseMigrationOpenFailed(t *testing.T) { assert.Regexp(t, "FF00184.*pop", err) } -func TestInitDatabaseMigrationFailed(t *testing.T) { +func TestInitDatabaseMigrationOk(t *testing.T) { mp := NewMockProvider() + defer mp.Close() + mr := metric.NewPrometheusMetricsRegistry("ut") + err := EnableDBMetrics(context.Background(), mr) + require.NoError(t, err) + defer func() { metricsRegistry = nil; metricsManager = nil }() mp.mmg.On("Lock").Return(nil) mp.mmg.On("Version").Return(-1, false, nil) mp.mmg.On("SetVersion", 1, true).Return(nil) @@ -107,11 +121,11 @@ func TestInitDatabaseMigrationFailed(t *testing.T) { mp.mmg.On("Unlock").Return(nil) mp.config.Set(SQLConfMigrationsAuto, true) mp.config.Set(SQLConfMigrationsDirectory, "../../test/dbmigrations") - err := mp.Database.Init(context.Background(), mp, mp.config) + err = mp.Database.Init(context.Background(), mp, mp.config) assert.NoError(t, err) } -func TestInitDatabaseMigrationOk(t *testing.T) { +func TestInitDatabaseMigrationFailed(t *testing.T) { mp := NewMockProvider() defer mp.Close() mp.mmg.On("Lock").Return(nil) diff --git a/pkg/dbsql/metrics.go b/pkg/dbsql/metrics.go new file mode 100644 index 0000000..5a5415e --- /dev/null +++ b/pkg/dbsql/metrics.go @@ -0,0 +1,119 @@ +// Copyright © 2026 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbsql + +import ( + "context" + "time" + + "github.com/hyperledger/firefly-common/pkg/log" + "github.com/hyperledger/firefly-common/pkg/metric" +) + +const ( + subsystem = "dbsql" + + metricOpDuration = "operation_duration_seconds" + metricTxDuration = "tx_duration_seconds" + metricMigrationDuration = "migration_duration_seconds" + metricTxCommit = "tx_commit_total" + metricTxRollback = "tx_rollback_total" + metricErrors = "errors_total" +) + +var ( + metricsRegistry metric.MetricsRegistry + metricsManager metric.MetricsManager + + // 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 2.5s, 5s + dbOperationBuckets = []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5} + // 10ms, 50ms, 100ms, 500ms, 1s, 2.5s, 5s, 10s, 30s, 60s + dbTxBuckets = []float64{0.01, 0.05, 0.1, 0.5, 1, 2.5, 5, 10, 30, 60} +) + +// EnableDBMetrics registers prometheus metrics for database operations. +// Must be called before any Database.Init to ensure metrics are wired up. +func EnableDBMetrics(ctx context.Context, _metricsRegistry metric.MetricsRegistry) (err error) { + if metricsRegistry != nil { + log.L(ctx).Warn("DB metrics are already enabled, skipping") + return nil + } + metricsRegistry = _metricsRegistry + + metricsManager, err = metricsRegistry.NewMetricsManagerForSubsystem(ctx, subsystem) + if err != nil { + metricsRegistry = nil + metricsManager = nil + return err + } + + metricsManager.NewHistogramMetricWithLabels(ctx, metricOpDuration, + "Latency of individual DB operations", dbOperationBuckets, + []string{"db", "table", "operation"}, false) + metricsManager.NewHistogramMetricWithLabels(ctx, metricTxDuration, + "Duration of DB transactions from begin to commit/rollback", dbTxBuckets, + []string{"db", "outcome"}, false) + metricsManager.NewSummaryMetricWithLabels(ctx, metricMigrationDuration, + "Duration of DB schema migrations", []string{"db"}, false) + metricsManager.NewCounterMetricWithLabels(ctx, metricTxCommit, + "Total committed DB transactions", []string{"db"}, false) + metricsManager.NewCounterMetricWithLabels(ctx, metricTxRollback, + "Total rolled-back DB transactions", []string{"db"}, false) + metricsManager.NewCounterMetricWithLabels(ctx, metricErrors, + "Total DB operation errors", []string{"db", "table", "operation"}, false) + + return nil +} + +func (s *Database) observeOp(ctx context.Context, table, operation string, elapsed time.Duration) { + if metricsManager == nil { + return + } + metricsManager.ObserveHistogramMetricWithLabels(ctx, metricOpDuration, elapsed.Seconds(), + map[string]string{"db": s.dbName, "table": table, "operation": operation}, nil) +} + +func (s *Database) incOpError(ctx context.Context, table, operation string) { + if metricsManager == nil { + return + } + metricsManager.IncCounterMetricWithLabels(ctx, metricErrors, + map[string]string{"db": s.dbName, "table": table, "operation": operation}, nil) +} + +func (s *Database) observeTx(ctx context.Context, elapsed time.Duration, outcome string) { + if metricsManager == nil { + return + } + metricsManager.ObserveHistogramMetricWithLabels(ctx, metricTxDuration, elapsed.Seconds(), + map[string]string{"db": s.dbName, "outcome": outcome}, nil) + if outcome == "commit" { + metricsManager.IncCounterMetricWithLabels(ctx, metricTxCommit, + map[string]string{"db": s.dbName}, nil) + } else { + metricsManager.IncCounterMetricWithLabels(ctx, metricTxRollback, + map[string]string{"db": s.dbName}, nil) + } +} + +func (s *Database) observeMigration(ctx context.Context, elapsed time.Duration) { + if metricsManager == nil { + return + } + metricsManager.ObserveSummaryMetricWithLabels(ctx, metricMigrationDuration, elapsed.Seconds(), + map[string]string{"db": s.dbName}, nil) +} diff --git a/pkg/dbsql/metrics_test.go b/pkg/dbsql/metrics_test.go new file mode 100644 index 0000000..a1a5773 --- /dev/null +++ b/pkg/dbsql/metrics_test.go @@ -0,0 +1,91 @@ +// Copyright © 2026 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbsql + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/hyperledger/firefly-common/mocks/metricmocks" + "github.com/hyperledger/firefly-common/pkg/metric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestEnableDBMetrics(t *testing.T) { + defer func() { metricsRegistry = nil; metricsManager = nil }() + + ctx := context.Background() + mr := metric.NewPrometheusMetricsRegistry("ut") + + err := EnableDBMetrics(ctx, mr) + assert.NoError(t, err) + + // second call is a no-op + err = EnableDBMetrics(ctx, mr) + assert.NoError(t, err) + + // test error from NewMetricsManagerForSubsystem + metricsRegistry = nil + metricsManager = nil + mmr := metricmocks.NewMetricsRegistry(t) + mmr.On("NewMetricsManagerForSubsystem", mock.Anything, mock.Anything).Return(nil, errors.New("pop")) + err = EnableDBMetrics(ctx, mmr) + assert.Error(t, err) + assert.Regexp(t, "pop", err) + assert.Nil(t, metricsRegistry) + assert.Nil(t, metricsManager) +} + +func TestObserveOpNoManager(t *testing.T) { + defer func() { metricsRegistry = nil; metricsManager = nil }() + + db := &Database{dbName: "testdb"} + // should not panic when metricsManager is nil + db.observeOp(context.Background(), "things", "query", time.Millisecond) + db.incOpError(context.Background(), "things", "query") + db.observeTx(context.Background(), time.Millisecond, "commit") + db.observeMigration(context.Background(), time.Millisecond) +} + +func TestObserveOpWithManager(t *testing.T) { + defer func() { metricsRegistry = nil; metricsManager = nil }() + + ctx := context.Background() + mr := metric.NewPrometheusMetricsRegistry("ut_observe") + err := EnableDBMetrics(ctx, mr) + assert.NoError(t, err) + + db := &Database{dbName: "testdb"} + + db.observeOp(ctx, "things", "query", 10*time.Millisecond) + db.observeOp(ctx, "things", "insert", 5*time.Millisecond) + db.observeOp(ctx, "things", "update", 3*time.Millisecond) + db.observeOp(ctx, "things", "delete", 2*time.Millisecond) + db.observeOp(ctx, "things", "count", 1*time.Millisecond) + db.observeOp(ctx, "things", "exec", 1*time.Millisecond) + + db.incOpError(ctx, "things", "query") + db.incOpError(ctx, "things", "insert") + + db.observeTx(ctx, 50*time.Millisecond, "commit") + db.observeTx(ctx, 20*time.Millisecond, "rollback") + + db.observeMigration(ctx, 100*time.Millisecond) +} diff --git a/pkg/dbsql/mock_provider.go b/pkg/dbsql/mock_provider.go index 21aa481..52ef5f8 100644 --- a/pkg/dbsql/mock_provider.go +++ b/pkg/dbsql/mock_provider.go @@ -1,4 +1,4 @@ -// Copyright © 2024 Kaleido, Inc. +// Copyright © 2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -47,6 +47,7 @@ type MockProviderConnScoped struct { type MockProviderConfig struct { FakePSQLInsert bool OpenError error + GetDatabaseNameError error GetMigrationDriverError error IndividualSort bool MultiRowInsert bool @@ -116,3 +117,7 @@ func (mp *MockProvider) GetMigrationDriver(_ *sql.DB) (migratedb.Driver, error) func (mp *MockProviderConnScoped) GetMigrationDriverConn(_ *sql.DB) (migratedb.Driver, error) { return mp.mmg, mp.GetMigrationDriverError } + +func (mp *MockProvider) GetDatabaseName(_ string) (string, error) { + return "mockdb", mp.GetDatabaseNameError +} diff --git a/pkg/dbsql/postgres_helpers.go b/pkg/dbsql/postgres_helpers.go index e9085ee..5e1b382 100644 --- a/pkg/dbsql/postgres_helpers.go +++ b/pkg/dbsql/postgres_helpers.go @@ -1,4 +1,4 @@ -// Copyright © 2024 Kaleido, Inc. +// Copyright © 2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -20,6 +20,7 @@ import ( "context" "database/sql/driver" "fmt" + "net/url" "strings" sq "github.com/Masterminds/squirrel" @@ -45,3 +46,23 @@ func BuildPostgreSQLOptimizedUpsert(ctx context.Context, table string, idColumn return insert.Suffix(fmt.Sprintf("ON CONFLICT (%s) DO UPDATE", idColumn)).SuffixExpr(sq.Expr(updateSQL, updateValues...)).Suffix("RETURNING " + returnCol), nil } + +// GetPostgreSQLDatabaseName extracts the database name from a PostgreSQL connection URI +// (e.g. postgres://user:pass@host:port/dbname?params). Returns an error if the URL +// has no scheme or no explicit database name in the path. +// +// libpq defaults dbname to the connecting OS username when omitted, but this function +// has no way to determine that, so it requires an explicit dbname in the URL. +func GetPostgreSQLDatabaseName(pgURL string) (string, error) { + + parsed, err := url.Parse(pgURL) + if err != nil || parsed.Scheme == "" { + return "", i18n.NewError(context.Background(), i18n.MsgDBFailedToExtractDBName) + } + + dbName := strings.TrimPrefix(parsed.Path, "/") + if dbName == "" { + return "", i18n.NewError(context.Background(), i18n.MsgDBFailedToExtractDBName) + } + return dbName, nil +} diff --git a/pkg/dbsql/postgres_helpers_test.go b/pkg/dbsql/postgres_helpers_test.go index 0e84c15..64da8a5 100644 --- a/pkg/dbsql/postgres_helpers_test.go +++ b/pkg/dbsql/postgres_helpers_test.go @@ -1,4 +1,4 @@ -// Copyright © 2024 Kaleido, Inc. +// Copyright © 2024-2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -60,3 +60,47 @@ func TestBuildPostgreSQLOptimizedUpsertFail(t *testing.T) { assert.Regexp(t, "FF00247", err) } + +func TestGetPostgreSQLDatabaseName(t *testing.T) { + + dbName, err := GetPostgreSQLDatabaseName("postgres://user:password@host:5432/dbname") + assert.NoError(t, err) + assert.Equal(t, "dbname", dbName) + + dbName, err = GetPostgreSQLDatabaseName("postgres://user:password@host:5432/dbnameWithParam?param=value") + assert.NoError(t, err) + assert.Equal(t, "dbnameWithParam", dbName) + + dbName, err = GetPostgreSQLDatabaseName("postgres://user:password@host:5432/dbnameWithParams?param=value¶m=value") + assert.NoError(t, err) + assert.Equal(t, "dbnameWithParams", dbName) + + // postgresql:// scheme variant + dbName, err = GetPostgreSQLDatabaseName("postgresql://user:password@host:5432/mydb") + assert.NoError(t, err) + assert.Equal(t, "mydb", dbName) + + // Password with special characters (percent-encoded per RFC 3986) + dbName, err = GetPostgreSQLDatabaseName("postgres://user:p%40ss%2Fword@host:5432/mydb") + assert.NoError(t, err) + assert.Equal(t, "mydb", dbName) + + // No dbname in scheme URL — must error, not return host/user/password + _, err = GetPostgreSQLDatabaseName("postgres://user:password@host:5432") + assert.Regexp(t, "FF00260", err) + + _, err = GetPostgreSQLDatabaseName("postgres://user:password@host:5432/") + assert.Regexp(t, "FF00260", err) + + _, err = GetPostgreSQLDatabaseName("postgres://user:password@host:5432?sslmode=disable") + assert.Regexp(t, "FF00260", err) + + // No scheme — not a valid libpq URI + _, err = GetPostgreSQLDatabaseName("host:5432/dbname") + assert.Regexp(t, "FF00260", err) + + // Empty string + _, err = GetPostgreSQLDatabaseName("") + assert.Regexp(t, "FF00260", err) + +} diff --git a/pkg/dbsql/provider.go b/pkg/dbsql/provider.go index 47926d2..7c4f9aa 100644 --- a/pkg/dbsql/provider.go +++ b/pkg/dbsql/provider.go @@ -1,4 +1,4 @@ -// Copyright © 2025 Kaleido, Inc. +// Copyright © 2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -67,6 +67,9 @@ type Provider interface { // ApplyInsertQueryCustomizations updates the INSERT query for returning the Sequence, and returns whether it needs to be run as a query to return the Sequence field ApplyInsertQueryCustomizations(insert sq.InsertBuilder, requestConflictEmptyResult bool) (updatedInsert sq.InsertBuilder, runAsQuery bool) + + // GetDatabaseName returns the name of the database from the URL + GetDatabaseName(url string) (string, error) } // Implementing this interface allows cleanup of the connection used during migration diff --git a/pkg/dbsql/provider_sqlitego.go b/pkg/dbsql/provider_sqlitego.go index 4f91381..872a276 100644 --- a/pkg/dbsql/provider_sqlitego.go +++ b/pkg/dbsql/provider_sqlitego.go @@ -1,4 +1,4 @@ -// Copyright © 2024 Kaleido, Inc. +// Copyright © 2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -19,6 +19,8 @@ package dbsql import ( "context" "database/sql" + "path/filepath" + "strings" sq "github.com/Masterminds/squirrel" migratedb "github.com/golang-migrate/migrate/v4/database" @@ -75,3 +77,18 @@ func (p *sqLiteProvider) Open(url string) (*sql.DB, error) { func (p *sqLiteProvider) GetMigrationDriver(db *sql.DB) (migratedb.Driver, error) { return sqlite3.WithInstance(db, &sqlite3.Config{}) } + +func (p *sqLiteProvider) GetDatabaseName(dsn string) (string, error) { + // Strip file: URI prefix if present + name := strings.TrimPrefix(dsn, "file:") + // Strip query parameters (?_journal=WAL, ?mode=ro, etc.) + if qIdx := strings.Index(name, "?"); qIdx >= 0 { + name = name[:qIdx] + } + // Special names pass through as-is + if name == ":memory:" || name == "" { + return "sqlite", nil + } + // Use the base filename without extension as the database name + return strings.TrimSuffix(filepath.Base(name), filepath.Ext(name)), nil +} diff --git a/pkg/dbsql/provider_sqlitego_test.go b/pkg/dbsql/provider_sqlitego_test.go new file mode 100644 index 0000000..5ac19b1 --- /dev/null +++ b/pkg/dbsql/provider_sqlitego_test.go @@ -0,0 +1,53 @@ +// Copyright © 2026 Kaleido, Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbsql + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetSQLiteDatabaseName(t *testing.T) { + p := &sqLiteProvider{} + + dbName, err := p.GetDatabaseName("/var/data/myapp.db") + assert.NoError(t, err) + assert.Equal(t, "myapp", dbName) + + dbName, err = p.GetDatabaseName("/var/data/myapp.sqlite?_journal=WAL") + assert.NoError(t, err) + assert.Equal(t, "myapp", dbName) + + dbName, err = p.GetDatabaseName("file:/var/data/myapp.sqlite?mode=ro") + assert.NoError(t, err) + assert.Equal(t, "myapp", dbName) + + dbName, err = p.GetDatabaseName("myapp.db") + assert.NoError(t, err) + assert.Equal(t, "myapp", dbName) + + // :memory: falls back to generic "sqlite" + dbName, err = p.GetDatabaseName(":memory:") + assert.NoError(t, err) + assert.Equal(t, "sqlite", dbName) + + // Empty DSN falls back to generic "sqlite" + dbName, err = p.GetDatabaseName("") + assert.NoError(t, err) + assert.Equal(t, "sqlite", dbName) +} diff --git a/pkg/ffapi/apiserver.go b/pkg/ffapi/apiserver.go index 3c8b226..a6883b5 100644 --- a/pkg/ffapi/apiserver.go +++ b/pkg/ffapi/apiserver.go @@ -1,4 +1,4 @@ -// Copyright © 2025 Kaleido, Inc. +// Copyright © 2026 Kaleido, Inc. // // SPDX-License-Identifier: Apache-2.0 // @@ -18,6 +18,7 @@ package ffapi import ( "context" + "encoding/json" "fmt" "io" "net" @@ -122,15 +123,16 @@ func NewAPIServer[T any](ctx context.Context, options APIServerOptions[T]) APISe } as := &apiServer[T]{ - defaultFilterLimit: options.APIConfig.GetUint64(ConfAPIDefaultFilterLimit), - maxFilterLimit: options.APIConfig.GetUint64(ConfAPIMaxFilterLimit), - maxFilterSkip: options.APIConfig.GetUint64(ConfAPIMaxFilterSkip), - requestTimeout: options.APIConfig.GetDuration(ConfAPIRequestTimeout), - requestMaxTimeout: options.APIConfig.GetDuration(ConfAPIRequestMaxTimeout), - monitoringEnabled: options.MonitoringConfig.GetBool(ConfMonitoringServerEnabled), - metricsPath: options.MonitoringConfig.GetString(ConfMonitoringServerMetricsPath), - livenessPath: options.MonitoringConfig.GetString(ConfMonitoringServerLivenessPath), - loggingPath: options.MonitoringConfig.GetString(ConfMonitoringServerLoggingPath), + defaultFilterLimit: options.APIConfig.GetUint64(ConfAPIDefaultFilterLimit), + maxFilterLimit: options.APIConfig.GetUint64(ConfAPIMaxFilterLimit), + maxFilterSkip: options.APIConfig.GetUint64(ConfAPIMaxFilterSkip), + requestTimeout: options.APIConfig.GetDuration(ConfAPIRequestTimeout), + requestMaxTimeout: options.APIConfig.GetDuration(ConfAPIRequestMaxTimeout), + monitoringEnabled: options.MonitoringConfig.GetBool(ConfMonitoringServerEnabled), + metricsPath: options.MonitoringConfig.GetString(ConfMonitoringServerMetricsPath), + livenessPath: options.MonitoringConfig.GetString(ConfMonitoringServerLivenessPath), + loggingPath: options.MonitoringConfig.GetString(ConfMonitoringServerLoggingPath), + alwaysPaginate: options.APIConfig.GetBool(ConfAPIAlwaysPaginate), handleYAML: options.HandleYAML, apiDynamicPublicURLHeader: options.APIConfig.GetString(ConfAPIDynamicPublicURLHeader), @@ -405,12 +407,25 @@ func (as *apiServer[T]) noContentResponder(res http.ResponseWriter, _ *http.Requ res.WriteHeader(http.StatusNoContent) } -func (as *apiServer[T]) loggingSettingsHandler(_ http.ResponseWriter, req *http.Request) (status int, err error) { +type loggingChanges struct { + Level *logSettingChange `json:"level,omitempty"` +} + +type logSettingChange struct { + Previous any `json:"previous,omitempty"` + New any `json:"new"` +} + +func LoggingSettingsHandler(res http.ResponseWriter, req *http.Request) (status int, err error) { if req.Method != http.MethodPut { return http.StatusMethodNotAllowed, i18n.NewError(req.Context(), i18n.MsgMethodNotAllowed) } + + changes := &loggingChanges{} + logLevel := req.URL.Query().Get("level") if logLevel != "" { + currentLogLevel := log.GetLevel() l := log.L(log.WithLogFieldsMap(req.Context(), map[string]string{"new_level": logLevel})) switch strings.ToLower(logLevel) { case "error": @@ -418,16 +433,26 @@ func (as *apiServer[T]) loggingSettingsHandler(_ http.ResponseWriter, req *http. case "trace": case "info": case "warn": + case "warning": // noop - all valid levels default: l.Warn("invalid log level") return http.StatusBadRequest, i18n.NewError(req.Context(), i18n.MsgInvalidLogLevel, logLevel) } - l.Warn("changing log level", logLevel) + l.Warn("changing log level") log.SetLevel(logLevel) + changes.Level = &logSettingChange{ + Previous: currentLogLevel, + New: logLevel, + } } - // TODO allow for toggling formatting (json, text), sampling, etc. + res.Header().Add("Content-Type", "application/json") + changesJSON, _ := json.Marshal(changes) + _, err = res.Write(changesJSON) + if err != nil { + return http.StatusInternalServerError, err + } return http.StatusAccepted, nil } @@ -442,7 +467,7 @@ func (as *apiServer[T]) createMonitoringMuxRouter(ctx context.Context) (*mux.Rou panic(err) } r.Path(as.metricsPath).Handler(h) - r.Path(as.loggingPath).Handler(hf.APIWrapper(as.loggingSettingsHandler)) + r.Path(as.loggingPath).Handler(hf.APIWrapper(LoggingSettingsHandler)) r.HandleFunc(as.livenessPath, as.noContentResponder) for _, route := range as.MonitoringRoutes { diff --git a/pkg/ffapi/apiserver_config.go b/pkg/ffapi/apiserver_config.go index 2d374c4..946251f 100644 --- a/pkg/ffapi/apiserver_config.go +++ b/pkg/ffapi/apiserver_config.go @@ -26,14 +26,13 @@ var ( ConfMonitoringServerMetricsPath = "metricsPath" ConfMonitoringServerLivenessPath = "livenessPath" ConfMonitoringServerLoggingPath = "loggingPath" - - ConfAPIDefaultFilterLimit = "defaultFilterLimit" - ConfAPIMaxFilterLimit = "maxFilterLimit" - ConfAPIMaxFilterSkip = "maxFilterSkip" - ConfAPIRequestTimeout = "requestTimeout" - ConfAPIRequestMaxTimeout = "requestMaxTimeout" - ConfAPIAlwaysPaginate = "alwaysPaginate" - ConfAPIDynamicPublicURLHeader = "dynamicPublicURLHeader" + ConfAPIDefaultFilterLimit = "defaultFilterLimit" + ConfAPIMaxFilterLimit = "maxFilterLimit" + ConfAPIMaxFilterSkip = "maxFilterSkip" + ConfAPIRequestTimeout = "requestTimeout" + ConfAPIRequestMaxTimeout = "requestMaxTimeout" + ConfAPIAlwaysPaginate = "alwaysPaginate" + ConfAPIDynamicPublicURLHeader = "dynamicPublicURLHeader" ) func InitAPIServerConfig(apiConfig, monitoringConfig, corsConfig config.Section) { diff --git a/pkg/ffapi/apiserver_test.go b/pkg/ffapi/apiserver_test.go index 6f0fd11..712a46e 100644 --- a/pkg/ffapi/apiserver_test.go +++ b/pkg/ffapi/apiserver_test.go @@ -30,8 +30,10 @@ import ( "github.com/go-resty/resty/v2" "github.com/hyperledger/firefly-common/pkg/config" "github.com/hyperledger/firefly-common/pkg/httpserver" + "github.com/hyperledger/firefly-common/pkg/log" "github.com/hyperledger/firefly-common/pkg/metric" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // utManager simulate the type you'd pass through your request interface @@ -783,3 +785,50 @@ func TestVersionedAPIInitErrors(t *testing.T) { assert.Regexp(t, "FF00255", err) } + +func TestMonitorServerChangeLogLevel(t *testing.T) { + _, as, done := newTestAPIServer(t, true) + defer done() + + <-as.started + + logLevels := []string{"error", "warning" /* warn also works but logrus converts to warning so we test that for simplicity */, "debug", "trace"} + prevLevel := "info" + for _, newLevel := range logLevels { + require.Equal(t, prevLevel, log.GetLevel()) + res, err := resty.New().R(). + Put(fmt.Sprintf("%s/logging?level=%s", as.MonitoringPublicURL(), newLevel)) + require.NoError(t, err) + assert.NotEmpty(t, res.Body()) + assert.Equal(t, newLevel, log.GetLevel()) + changes := &loggingChanges{} + err = json.Unmarshal(res.Body(), changes) + require.NoError(t, err) + assert.Equal(t, prevLevel, changes.Level.Previous) + assert.Equal(t, newLevel, changes.Level.New) + prevLevel = newLevel + } + + res, err := resty.New().R(). + Put(fmt.Sprintf("%s/logging?level=unsupported", as.MonitoringPublicURL())) + assert.NoError(t, err) + assert.Equal(t, http.StatusBadRequest, res.StatusCode()) + assert.Contains(t, string(res.Body()), "FF00257") +} + +func TestMonitoringServerMetrics(t *testing.T) { + _, as, done := newTestAPIServer(t, true) + defer done() + + <-as.Started() + + res, err := resty.New().R(). + Get(fmt.Sprintf("%s/metrics", as.MonitoringPublicURL())) + require.NoError(t, err) + assert.NotEmpty(t, res.Body()) + metrics := string(res.Body()) + assert.Contains(t, metrics, "HELP") // help text from standard openmetrics endpoint + assert.Contains(t, metrics, "go_build_info") // build info + assert.Contains(t, metrics, "go_gc_duration_seconds") // go + assert.Contains(t, metrics, "promhttp_metric_handler_requests_total") // http for prom +} diff --git a/pkg/ffresty/ffresty.go b/pkg/ffresty/ffresty.go index 2c7fad4..e311adc 100644 --- a/pkg/ffresty/ffresty.go +++ b/pkg/ffresty/ffresty.go @@ -99,16 +99,19 @@ type HTTPConfig struct { func EnableClientMetrics(ctx context.Context, metricsRegistry metric.MetricsRegistry) error { // create a metrics manager (if not already set) - if metricsManager == nil { - mm, err := metricsRegistry.NewMetricsManagerForSubsystem(ctx, "resty") - metricsManager = mm - if err != nil { - return err - } - metricsManager.NewCounterMetricWithLabels(ctx, metricsHTTPResponsesTotal, "HTTP response", []string{"status", "error", "host", "method"}, false) - metricsManager.NewCounterMetricWithLabels(ctx, metricsNetworkErrorsTotal, "Network error", []string{"host", "method"}, false) - metricsManager.NewSummaryMetricWithLabels(ctx, metricsHTTPResponseTime, "HTTP response time", []string{"status", "host", "method"}, false) + if metricsManager != nil { + log.L(ctx).Warn("resty metrics are already enabled, skipping") + return nil + } + + mm, err := metricsRegistry.NewMetricsManagerForSubsystem(ctx, "resty") + if err != nil { + return err } + metricsManager = mm + metricsManager.NewCounterMetricWithLabels(ctx, metricsHTTPResponsesTotal, "HTTP response", []string{"status", "error", "host", "method"}, false) + metricsManager.NewCounterMetricWithLabels(ctx, metricsNetworkErrorsTotal, "Network error", []string{"host", "method"}, false) + metricsManager.NewSummaryMetricWithLabels(ctx, metricsHTTPResponseTime, "HTTP response time", []string{"status", "host", "method"}, false) // create hooks onErrorMetricsHook := func(req *resty.Request, _ error) { diff --git a/pkg/i18n/en_base_error_messages.go b/pkg/i18n/en_base_error_messages.go index 2cf646f..55dabfe 100644 --- a/pkg/i18n/en_base_error_messages.go +++ b/pkg/i18n/en_base_error_messages.go @@ -194,4 +194,5 @@ var ( MsgInvalidLogLevel = ffe("FF00257", "Invalid log level: '%s'", http.StatusBadRequest) MsgFFExtensionsInvalid = ffe("FF00258", "Invalid extension '%s' - extensions should be RFC 3986 compliant query parameter format (e.g. x-name=value with percent-encoding for special characters)", http.StatusBadRequest) MsgFFExtensionsInvalidEncoding = ffe("FF00259", "Invalid extension key '%s' - extension keys must follow the format 'x-'", http.StatusBadRequest) + MsgDBFailedToExtractDBName = ffe("FF00260", "Failed to extract database name from PostgreSQL URL") ) diff --git a/pkg/log/log.go b/pkg/log/log.go index b855f3c..1d95e1a 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -89,10 +89,15 @@ func loggerFromContext(ctx context.Context) *logrus.Entry { return logger.(*logrus.Entry) } +// SetLevel allows for dynamically changing the log level func SetLevel(level string) { switch strings.ToLower(level) { case "error": logrus.SetLevel(logrus.ErrorLevel) + case "warn": + logrus.SetLevel(logrus.WarnLevel) + case "warning": + logrus.SetLevel(logrus.WarnLevel) case "debug": logrus.SetLevel(logrus.DebugLevel) case "trace": @@ -102,6 +107,11 @@ func SetLevel(level string) { } } +// GetLevel returns the currently configured log level +func GetLevel() string { + return logrus.GetLevel().String() +} + type Formatting struct { DisableColor bool ForceColor bool diff --git a/pkg/metric/metric.go b/pkg/metric/metric.go index 8de7ccb..1c240ec 100644 --- a/pkg/metric/metric.go +++ b/pkg/metric/metric.go @@ -113,6 +113,7 @@ func NewPrometheusMetricsRegistryWithOptions(componentName string /*component na // register default cpu & go metrics by default registerer.MustRegister(collectors.NewGoCollector()) registerer.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + registerer.MustRegister(collectors.NewBuildInfoCollector()) metricsPrefix := ffMetricsPrefix if opts.MetricsPrefix != "" {