Skip to content

Commit b5d3c14

Browse files
Sql analyzer optimize (#909)
1 parent 7433a45 commit b5d3c14

14 files changed

Lines changed: 203 additions & 22 deletions

File tree

.golangci.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,9 @@ linters:
4747
- text: "Error return value of os.*Setenv is not checked"
4848
linters:
4949
- errcheck
50+
- source: "^\\s*defer\\s+"
51+
linters:
52+
- errcheck
5053

5154
settings:
5255
errcheck:
@@ -55,6 +58,7 @@ linters:
5558
- (*net/http.Response).Body.Close
5659
- (*database/sql.DB).Close
5760
- (*database/sql.Conn).Close
61+
- (*database/sql.Rows).Close
5862
- (*github.com/spf13/cobra.Command).MarkFlagRequired
5963
- (*github.com/oceanbase/ob-operator/internal/server.HTTPServer).Stop
6064
- io.ReadCloser.Close

build/Dockerfile.sql-analyzer

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,13 @@ ARG GOPROXY
55
ARG GOSUMDB
66
ARG ASAN_ENABLED
77

8+
# Install clang if ASAN_ENABLED is set
9+
RUN if [ -n "$ASAN_ENABLED" ]; then \
10+
apt-get update && \
11+
apt-get install -y libasan8 clang && \
12+
rm -rf /var/lib/apt/lists/*; \
13+
fi
14+
815
WORKDIR /workspace
916
# copy everything
1017
COPY . .
@@ -20,7 +27,7 @@ RUN ASAN_ENABLED=$ASAN_ENABLED make sql-analyzer
2027
FROM ubuntu:24.04
2128

2229
ARG ASAN_ENABLED
23-
# Install libasan if ASAN_ENABLED is set
30+
# Install libasan runtime if ASAN_ENABLED is set
2431
RUN if [ -n "$ASAN_ENABLED" ]; then \
2532
apt-get update && \
2633
apt-get install -y libasan8 && \

cmd/sql-analyzer/main.go

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,17 @@ func main() {
211211
}
212212
}
213213

214+
// Configure DuckDB threads
215+
duckDBThreads := 4
216+
duckDBThreadsStr := os.Getenv("DUCKDB_THREADS")
217+
if duckDBThreadsStr != "" {
218+
if val, err := strconv.Atoi(duckDBThreadsStr); err == nil && val > 0 {
219+
duckDBThreads = val
220+
} else {
221+
analyzerLogger.Warnf("Invalid DUCKDB_THREADS value '%s', using default of 4.", duckDBThreadsStr)
222+
}
223+
}
224+
214225
config := &config.Config{
215226
Namespace: namespace,
216227
OBTenant: obtenant,
@@ -223,6 +234,7 @@ func main() {
223234
WorkerNum: workerNum,
224235
PlanCacheSize: planCacheSize,
225236
DuckDBMaxOpenConns: duckDBMaxOpenConns,
237+
DuckDBThreads: duckDBThreads,
226238
}
227239

228240
// Initialize Stores

internal/sql-analyzer/collector/collector.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,8 @@ func (c *Collector) Init() error {
7070
return errors.Wrap(err, "Failed to init sql plan table")
7171
}
7272

73-
c.SqlAuditStore.StartCleanupWorker()
73+
c.SqlAuditStore.StartBackgroundWorkers()
74+
c.SqlPlanStore.StartBackgroundWorkers()
7475

7576
obtenant, err := clients.GetOBTenant(c.Ctx, types.NamespacedName{
7677
Namespace: c.Config.Namespace,

internal/sql-analyzer/collector/sql_audit.go

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ package collector
1515
import (
1616
"fmt"
1717
"sync"
18+
"time"
1819

1920
"github.com/pkg/errors"
2021

@@ -59,8 +60,15 @@ func (c *Collector) collectSqlAuditData() {
5960

6061
for svrIP, maxRequestID := range maxRequestIDs {
6162
lastRequestID, ok := c.RequestIdMap[svrIP]
62-
if ok && lastRequestID == maxRequestID {
63-
continue
63+
if ok {
64+
if lastRequestID == maxRequestID {
65+
continue
66+
}
67+
if lastRequestID > maxRequestID {
68+
c.Logger.Infof("Detected request_id reset for observer %s (last: %d, current max: %d). Resetting cursor.", svrIP, lastRequestID, maxRequestID)
69+
lastRequestID = 0
70+
c.RequestIdMap[svrIP] = 0
71+
}
6472
}
6573

6674
wg.Add(1)
@@ -150,7 +158,7 @@ func (c *Collector) collectSqlAuditByOBServer(svrIP string, lastRequestID uint64
150158
return nil, errors.Wrap(err, "Failed to get oceanbase connection")
151159
}
152160

153-
if err := cnx.QueryList(c.Ctx, &results, sqlconst.GetSqlStatistics, c.TenantID, svrIP, oceanbaseconst.RpcPort, lastRequestID); err != nil {
161+
if err := cnx.QueryListWithTimeout(c.Ctx, sqlconst.QueryOceanBaseTimeoutSeconds*time.Second, &results, sqlconst.GetSqlStatistics, c.TenantID, svrIP, oceanbaseconst.RpcPort, lastRequestID); err != nil {
154162
return nil, err
155163
}
156164
return results, nil

internal/sql-analyzer/config/collector.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,5 @@ type Config struct {
2727
SlowSqlThresholdMilliSeconds int
2828
PlanCacheSize int
2929
DuckDBMaxOpenConns int
30+
DuckDBThreads int
3031
}
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
/*
2+
Copyright (c) 2025 OceanBase
3+
ob-operator is licensed under Mulan PSL v2.
4+
You can use this software according to the terms and conditions of the Mulan PSL v2.
5+
You may obtain a copy of Mulan PSL v2 at:
6+
http://license.coscl.org.cn/MulanPSL2
7+
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
8+
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
9+
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
10+
See the Mulan PSL v2 for more details.
11+
*/
12+
13+
package sql
14+
15+
const (
16+
QueryOceanBaseTimeoutSeconds = 30
17+
)

internal/sql-analyzer/oceanbase/index.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ package oceanbase
1515
import (
1616
"context"
1717
"strings"
18+
"time"
1819

1920
"github.com/oceanbase/ob-operator/internal/sql-analyzer/api/model"
2021
sqlconst "github.com/oceanbase/ob-operator/internal/sql-analyzer/const/sql"
@@ -36,7 +37,7 @@ type PrimaryKeyRow struct {
3637
func QueryTableIndexes(ctx context.Context, opMgr *operation.OceanbaseOperationManager, tenantID uint64, dbName, tableName string, tableID int64) ([]model.IndexInfo, error) {
3738
var indexes []model.IndexInfo
3839
var pkRows []PrimaryKeyRow
39-
err := opMgr.QueryList(ctx, &pkRows, sqlconst.GetTablePrimaryKey, tenantID, tableID)
40+
err := opMgr.QueryListWithTimeout(ctx, sqlconst.QueryOceanBaseTimeoutSeconds*time.Second, &pkRows, sqlconst.GetTablePrimaryKey, tenantID, tableID)
4041
if err != nil {
4142
return nil, err
4243
}
@@ -58,7 +59,7 @@ func QueryTableIndexes(ctx context.Context, opMgr *operation.OceanbaseOperationM
5859
}
5960

6061
var rows []IndexRow
61-
err = opMgr.QueryList(ctx, &rows, sqlconst.GetTableIndex, tenantID, dbName, tableName)
62+
err = opMgr.QueryListWithTimeout(ctx, sqlconst.QueryOceanBaseTimeoutSeconds*time.Second, &rows, sqlconst.GetTableIndex, tenantID, dbName, tableName)
6263
if err != nil {
6364
return nil, err
6465
}

internal/sql-analyzer/store/global.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,12 @@ var (
2929

3030
func InitGlobalStores(ctx context.Context, conf *config.Config, logger *logrus.Logger) error {
3131
var err error
32-
globalSqlAuditStore, err = NewSqlAuditStore(ctx, filepath.Join(conf.DataPath, "sql_audit"), conf.DuckDBMaxOpenConns, logger)
32+
globalSqlAuditStore, err = NewSqlAuditStore(ctx, filepath.Join(conf.DataPath, "sql_audit"), conf.DuckDBMaxOpenConns, conf.DuckDBThreads, logger)
3333
if err != nil {
3434
return err
3535
}
3636

37-
globalPlanStore, err = NewPlanStore(ctx, filepath.Join(conf.DataPath, "sql_plan"), conf.DuckDBMaxOpenConns, logger)
37+
globalPlanStore, err = NewPlanStore(ctx, filepath.Join(conf.DataPath, "sql_plan"), conf.DuckDBMaxOpenConns, conf.DuckDBThreads, logger)
3838
if err != nil {
3939
return err
4040
}
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
/*
2+
Copyright (c) 2025 OceanBase
3+
ob-operator is licensed under Mulan PSL v2.
4+
You can use this software according to the terms and conditions of the Mulan PSL v2.
5+
You may obtain a copy of Mulan PSL v2 at:
6+
http://license.coscl.org.cn/MulanPSL2
7+
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
8+
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
9+
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
10+
See the Mulan PSL v2 for more details.
11+
*/
12+
13+
package store
14+
15+
import (
16+
"context"
17+
"database/sql"
18+
"runtime"
19+
"time"
20+
21+
logger "github.com/sirupsen/logrus"
22+
)
23+
24+
func StartMemoryMonitoring(ctx context.Context, name string, db *sql.DB, l *logger.Logger) {
25+
go func() {
26+
ticker := time.NewTicker(1 * time.Minute)
27+
defer ticker.Stop()
28+
for {
29+
select {
30+
case <-ticker.C:
31+
// Log Go Memory Stats
32+
var m runtime.MemStats
33+
runtime.ReadMemStats(&m)
34+
// bToMb helper closure
35+
bToMb := func(b uint64) uint64 {
36+
return b / 1024 / 1024
37+
}
38+
l.Infof("[%s] Go Memory: Alloc=%v MiB, Sys=%v MiB, HeapSys=%v MiB, StackSys=%v MiB, NumGC=%v",
39+
name, bToMb(m.Alloc), bToMb(m.Sys), bToMb(m.HeapSys), bToMb(m.StackSys), m.NumGC)
40+
41+
rows, err := db.QueryContext(ctx, "SELECT * FROM duckdb_memory()")
42+
if err != nil {
43+
l.Warnf("[%s] Failed to query duckdb_memory: %v", name, err)
44+
continue
45+
}
46+
defer rows.Close()
47+
48+
cols, _ := rows.Columns()
49+
for rows.Next() {
50+
columns := make([]any, len(cols))
51+
columnPointers := make([]any, len(cols))
52+
for i := range columns {
53+
columnPointers[i] = &columns[i]
54+
}
55+
56+
if err := rows.Scan(columnPointers...); err != nil {
57+
l.Warnf("[%s] Failed to scan duckdb_memory row: %v", name, err)
58+
break
59+
}
60+
61+
rowMap := make(map[string]any)
62+
for i, colName := range cols {
63+
val, ok := columnPointers[i].(*any)
64+
if !ok {
65+
l.Warnf("[%s] Failed to type assert column pointer for %s", name, colName)
66+
continue
67+
}
68+
rowMap[colName] = *val
69+
}
70+
l.Infof("[%s] DuckDB Memory: %v", name, rowMap)
71+
}
72+
if err := rows.Err(); err != nil {
73+
l.Warnf("[%s] Error during duckdb_memory iteration: %v", name, err)
74+
}
75+
case <-ctx.Done():
76+
return
77+
}
78+
}
79+
}()
80+
}

0 commit comments

Comments
 (0)