Skip to content

Commit 76ef0a3

Browse files
committed
refactor: fix race conditions, nil panics, O(n) hotpaths and code duplication across codebase
1 parent 892d96a commit 76ef0a3

19 files changed

Lines changed: 237 additions & 324 deletions

File tree

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# sql-parser-go
1+
# SQLens
22

33
[![Go Version](https://img.shields.io/badge/Go-1.21+-00ADD8?style=flat&logo=go)](https://golang.org)
44
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)

cmd/sqlparser/main.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@ import (
66
"flag"
77
"fmt"
88
"os"
9+
"os/signal"
910
"strings"
11+
"syscall"
1012
"time"
1113

1214
"github.com/Chahine-tech/sql-parser-go/internal/config"
@@ -287,6 +289,7 @@ func watchLogFile(filename string, cfg *config.Config, verbose bool, tailLines i
287289

288290
// Handle Ctrl+C
289291
sigChan := make(chan os.Signal, 1)
292+
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
290293

291294
fmt.Println("📊 Real-time monitoring started. Press Ctrl+C to stop.")
292295
fmt.Println(strings.Repeat("=", 80))

internal/performance/monitor.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,5 @@ func (pm *PerformanceMonitor) GetMetrics() map[string]interface{} {
7575
// ForceGC forces garbage collection and returns memory stats
7676
func ForceGC() MemoryStats {
7777
runtime.GC()
78-
runtime.GC() // Double GC to ensure cleanup
7978
return GetMemoryStats()
8079
}

pkg/analyzer/analyzer.go

Lines changed: 37 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ const (
1414
defaultJoinCapacity = 4
1515
defaultConditionCapacity = 8
1616
defaultCacheCapacity = 64
17+
maxCacheSize = 512
1718
)
1819

1920
type Analyzer struct {
@@ -99,6 +100,13 @@ func (a *Analyzer) AnalyzeWithCache(stmt parser.Statement, cacheKey string) Quer
99100

100101
if cacheKey != "" {
101102
a.mu.Lock()
103+
if len(a.cache) >= maxCacheSize {
104+
// Evict one arbitrary entry to cap memory usage.
105+
for k := range a.cache {
106+
delete(a.cache, k)
107+
break
108+
}
109+
}
102110
a.cache[cacheKey] = analysis
103111
a.mu.Unlock()
104112
}
@@ -126,10 +134,14 @@ func (a *Analyzer) analyzeSelectStatement(stmt *parser.SelectStatement) {
126134
Usage: "SELECT",
127135
})
128136

137+
condition := ""
138+
if join.Condition != nil {
139+
condition = join.Condition.String()
140+
}
129141
a.analysis.Joins = append(a.analysis.Joins, JoinInfo{
130142
Type: join.JoinType,
131143
RightTable: join.Table.Name,
132-
Condition: join.Condition.String(),
144+
Condition: condition,
133145
})
134146

135147
a.analyzeExpression(join.Condition, "JOIN")
@@ -312,29 +324,33 @@ func (a *Analyzer) SuggestOptimizations(stmt *parser.SelectStatement) []Optimiza
312324
return suggestions
313325
}
314326

315-
// GetEnhancedOptimizations returns comprehensive optimization suggestions using the new engine
327+
// GetEnhancedOptimizations returns comprehensive optimization suggestions using the new engine.
316328
func (a *Analyzer) GetEnhancedOptimizations(stmt parser.Statement) []EnhancedOptimizationSuggestion {
317-
if a.optimizationEngine == nil {
318-
// Fallback to basic suggestions if no optimization engine is available
319-
basicSuggestions := a.SuggestOptimizations(stmt.(*parser.SelectStatement))
320-
enhanced := make([]EnhancedOptimizationSuggestion, len(basicSuggestions))
321-
for i, basic := range basicSuggestions {
322-
enhanced[i] = EnhancedOptimizationSuggestion{
323-
Type: basic.Type,
324-
Description: basic.Description,
325-
Severity: basic.Severity,
326-
Category: "GENERAL",
327-
Rule: basic.Type,
328-
Line: basic.Line,
329-
Suggestion: "Review query for optimization opportunities",
330-
Impact: "MEDIUM",
331-
AutoFixable: false,
332-
}
333-
}
334-
return enhanced
329+
if a.optimizationEngine != nil {
330+
return a.optimizationEngine.AnalyzeOptimizations(stmt)
335331
}
336332

337-
return a.optimizationEngine.AnalyzeOptimizations(stmt)
333+
// Fallback: only SELECT statements support basic suggestions.
334+
sel, ok := stmt.(*parser.SelectStatement)
335+
if !ok {
336+
return nil
337+
}
338+
basicSuggestions := a.SuggestOptimizations(sel)
339+
enhanced := make([]EnhancedOptimizationSuggestion, len(basicSuggestions))
340+
for i, basic := range basicSuggestions {
341+
enhanced[i] = EnhancedOptimizationSuggestion{
342+
Type: basic.Type,
343+
Description: basic.Description,
344+
Severity: basic.Severity,
345+
Category: "GENERAL",
346+
Rule: basic.Type,
347+
Line: basic.Line,
348+
Suggestion: "Review query for optimization opportunities",
349+
Impact: "MEDIUM",
350+
AutoFixable: false,
351+
}
352+
}
353+
return enhanced
338354
}
339355

340356
// SetOptimizationEngine allows setting a custom optimization engine

pkg/analyzer/concurrent.go

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -41,19 +41,24 @@ type AnalysisResult struct {
4141
Error error
4242
}
4343

44-
// AnalyzeConcurrently analyzes multiple queries concurrently
44+
// AnalyzeConcurrently analyzes multiple queries concurrently.
4545
func (ca *ConcurrentAnalyzer) AnalyzeConcurrently(ctx context.Context, jobs []AnalysisJob) []AnalysisResult {
4646
jobChan := make(chan AnalysisJob, len(jobs))
4747
resultChan := make(chan AnalysisResult, len(jobs))
4848

49-
// Start workers
5049
var wg sync.WaitGroup
5150
for i := 0; i < ca.workerCount; i++ {
5251
wg.Add(1)
5352
go ca.worker(ctx, &wg, jobChan, resultChan)
5453
}
5554

56-
// Send jobs
55+
// Close resultChan only after all workers are done.
56+
go func() {
57+
wg.Wait()
58+
close(resultChan)
59+
}()
60+
61+
// Send jobs; close jobChan when done so workers can exit.
5762
go func() {
5863
defer close(jobChan)
5964
for _, job := range jobs {
@@ -65,24 +70,20 @@ func (ca *ConcurrentAnalyzer) AnalyzeConcurrently(ctx context.Context, jobs []An
6570
}
6671
}()
6772

68-
// Collect results
73+
// Collect results until the channel is closed or context is cancelled.
6974
results := make([]AnalysisResult, 0, len(jobs))
70-
for i := 0; i < len(jobs); i++ {
75+
for {
7176
select {
72-
case result := <-resultChan:
77+
case result, ok := <-resultChan:
78+
if !ok {
79+
return results
80+
}
7381
results = append(results, result)
7482
case <-ctx.Done():
75-
results = append(results, AnalysisResult{
76-
Error: ctx.Err(),
77-
})
83+
results = append(results, AnalysisResult{Error: ctx.Err()})
84+
return results
7885
}
7986
}
80-
81-
// Wait for workers to finish
82-
wg.Wait()
83-
close(resultChan)
84-
85-
return results
8687
}
8788

8889
// worker processes analysis jobs

pkg/dialect/oracle.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -57,15 +57,16 @@ func (d *OracleDialect) GetDataTypes() []string {
5757
return append(CommonDataTypes, oracle...)
5858
}
5959

60-
func (d *OracleDialect) IsReservedWord(word string) bool {
61-
keywords := d.GetKeywords()
62-
upper := strings.ToUpper(word)
63-
for _, keyword := range keywords {
64-
if keyword == upper {
65-
return true
66-
}
60+
var oracleReserved = func() map[string]bool {
61+
m := make(map[string]bool)
62+
for _, k := range (&OracleDialect{}).GetKeywords() {
63+
m[k] = true
6764
}
68-
return false
65+
return m
66+
}()
67+
68+
func (d *OracleDialect) IsReservedWord(word string) bool {
69+
return oracleReserved[strings.ToUpper(word)]
6970
}
7071

7172
func (d *OracleDialect) GetLimitSyntax() LimitSyntax {

pkg/dialect/sqlite.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,16 @@ func (d *SQLiteDialect) GetDataTypes() []string {
5050
return append(CommonDataTypes, sqlite...)
5151
}
5252

53-
func (d *SQLiteDialect) IsReservedWord(word string) bool {
54-
keywords := d.GetKeywords()
55-
upper := strings.ToUpper(word)
56-
for _, keyword := range keywords {
57-
if keyword == upper {
58-
return true
59-
}
53+
var sqliteReserved = func() map[string]bool {
54+
m := make(map[string]bool)
55+
for _, k := range (&SQLiteDialect{}).GetKeywords() {
56+
m[k] = true
6057
}
61-
return false
58+
return m
59+
}()
60+
61+
func (d *SQLiteDialect) IsReservedWord(word string) bool {
62+
return sqliteReserved[strings.ToUpper(word)]
6263
}
6364

6465
func (d *SQLiteDialect) GetLimitSyntax() LimitSyntax {

pkg/dialect/sqlserver.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,16 @@ func (d *SQLServerDialect) GetDataTypes() []string {
5252
return append(CommonDataTypes, sqlserver...)
5353
}
5454

55-
func (d *SQLServerDialect) IsReservedWord(word string) bool {
56-
keywords := d.GetKeywords()
57-
upper := strings.ToUpper(word)
58-
for _, keyword := range keywords {
59-
if keyword == upper {
60-
return true
61-
}
55+
var sqlserverReserved = func() map[string]bool {
56+
m := make(map[string]bool)
57+
for _, k := range (&SQLServerDialect{}).GetKeywords() {
58+
m[k] = true
6259
}
63-
return false
60+
return m
61+
}()
62+
63+
func (d *SQLServerDialect) IsReservedWord(word string) bool {
64+
return sqlserverReserved[strings.ToUpper(word)]
6465
}
6566

6667
func (d *SQLServerDialect) GetLimitSyntax() LimitSyntax {

pkg/lexer/lexer.go

Lines changed: 4 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -48,20 +48,11 @@ func (l *Lexer) readChar() {
4848
}
4949
}
5050

51-
// lookupIdent checks if an identifier is a keyword using the dialect
51+
// lookupIdent returns the token type for an identifier, checking common SQL keywords first.
5252
func (l *Lexer) lookupIdent(ident string) TokenType {
53-
// First check common SQL keywords
5453
if tok, ok := keywords[ident]; ok {
5554
return tok
5655
}
57-
58-
// If not found in common keywords, check if it's a dialect-specific reserved word
59-
if l.dialect.IsReservedWord(ident) {
60-
// For dialect-specific keywords, we'll treat them as identifiers for now
61-
// but could extend this to have dialect-specific token types
62-
return IDENT
63-
}
64-
6556
return IDENT
6657
}
6758

@@ -150,10 +141,10 @@ func (l *Lexer) NextToken() Token {
150141
// Double quotes can be string literals or identifiers depending on dialect
151142
if l.dialect.Name() == "PostgreSQL" || l.dialect.Name() == "SQLite" || l.dialect.Name() == "Oracle" {
152143
tok.Type = IDENT
153-
tok.Literal = l.readDoubleQuotedIdentifier()
144+
tok.Literal = l.readDoubleQuoted()
154145
} else {
155146
tok.Type = STRING
156-
tok.Literal = l.readDoubleQuotedString()
147+
tok.Literal = l.readDoubleQuoted()
157148
}
158149
tok.Position = l.position
159150
tok.Line = l.line
@@ -289,18 +280,7 @@ func (l *Lexer) readString() string {
289280
return result
290281
}
291282

292-
func (l *Lexer) readDoubleQuotedString() string {
293-
position := l.position + 1
294-
for {
295-
l.readChar()
296-
if l.ch == '"' || l.ch == 0 {
297-
break
298-
}
299-
}
300-
return l.input[position:l.position]
301-
}
302-
303-
func (l *Lexer) readDoubleQuotedIdentifier() string {
283+
func (l *Lexer) readDoubleQuoted() string {
304284
position := l.position + 1
305285
for {
306286
l.readChar()

pkg/logger/formats.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package logger
22

33
import "strings"
44

5+
56
// Supported log formats for SQL Server
67
type LogFormat int
78

@@ -69,12 +70,8 @@ func (d *LogFormatDetector) DetectFormat(sample string) LogFormat {
6970

7071
func containsAny(text string, patterns []string) bool {
7172
for _, pattern := range patterns {
72-
if len(text) >= len(pattern) {
73-
for i := 0; i <= len(text)-len(pattern); i++ {
74-
if text[i:i+len(pattern)] == pattern {
75-
return true
76-
}
77-
}
73+
if strings.Contains(text, pattern) {
74+
return true
7875
}
7976
}
8077
return false

0 commit comments

Comments
 (0)