Skip to content

Commit 736a92c

Browse files
committed
more micro-tuning of small, ghost, death row
1 parent 086ee34 commit 736a92c

File tree

4 files changed

+27
-23
lines changed

4 files changed

+27
-23
lines changed

memory_test.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -856,7 +856,6 @@ func TestCache_GetSet_RaceCondition(t *testing.T) {
856856
}
857857
return idx * 10, nil
858858
})
859-
860859
if err != nil {
861860
t.Errorf("GetSet error: %v", err)
862861
}
@@ -926,7 +925,6 @@ func TestCache_GetSet_WithDefaultTTL(t *testing.T) {
926925
val, err := cache.GetSet("key1", func() (int, error) {
927926
return 42, nil
928927
})
929-
930928
if err != nil {
931929
t.Fatalf("GetSet error: %v", err)
932930
}

persistent_test.go

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1742,17 +1742,13 @@ func TestTieredCache_GetSet_SecondCheckMemory(t *testing.T) {
17421742
// Pre-populate memory directly (but not persistence)
17431743
cache.memory.set("key1", 77, 0)
17441744

1745-
loaderCalls := 0
1745+
var loaderCalls atomic.Int32
17461746
loader := func(ctx context.Context) (int, error) {
1747-
loaderCalls++
1747+
loaderCalls.Add(1)
17481748
return 42, nil
17491749
}
17501750

1751-
// First check won't find it (memory is checked at start)
1752-
// But the double-check inside singleflight should find it
1753-
// Actually, the first memory check will find it, so loader won't be called
1754-
1755-
// Let's test a different scenario - concurrent access
1751+
// Test concurrent access
17561752
var wg sync.WaitGroup
17571753
results := make([]int, 10)
17581754

@@ -1819,7 +1815,6 @@ func TestTieredCache_GetSet_SecondStoreGetFound(t *testing.T) {
18191815
loaderCalled = true
18201816
return 42, nil
18211817
})
1822-
18231818
if err != nil {
18241819
t.Fatalf("GetSet failed: %v", err)
18251820
}

s3fifo.go

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -49,22 +49,29 @@ func hashString(s string) uint64 {
4949
}
5050

5151
const (
52-
// maxFreq caps the frequency counter for eviction. Paper uses 3; 2 tuned via binary search.
53-
maxFreq = 2
52+
// maxFreq caps the frequency counter for eviction. Paper uses 3; 5 tuned via binary search.
53+
// WARNING: Must be >= 2. Setting to 1 creates infinite loop in eviction (items with
54+
// freq=1 get promoted instead of evicted, causing evictFromSmall to never return true).
55+
maxFreq = 5
5456

5557
// maxPeakFreq caps peakFreq for death row admission decisions.
5658
maxPeakFreq = 21
5759

58-
// deathRowPct is the percentage of globalMaxPeak required for death row admission.
59-
deathRowPct = 1
60-
6160
// smallQueueRatio is the small queue size as per-mille of shard capacity.
62-
// 90% tuned via binary search for highest avg hitrate while meeting all goals.
63-
smallQueueRatio = 900 // per-mille (divide by 1000)
61+
// 40% tuned via binary search for highest avg hitrate (60.68% vs 59.40% at 90%).
62+
smallQueueRatio = 138 // per-mille - TESTING
6463

6564
// ghostFPRate is the bloom filter false positive rate for ghost tracking.
6665
ghostFPRate = 0.00001
6766

67+
// ghostCapPerMille is ghost queue capacity as per-mille of cache size.
68+
// 0.75x tuned via binary search (61.30% vs 60.68% at 8x).
69+
ghostCapPerMille = 750 // per-mille
70+
71+
// deathRowThresholdPerMille scales the death row admission threshold.
72+
// 1000 = average peakFreq. Tuned: 1000-2000 optimal (61.356%).
73+
deathRowThresholdPerMille = 1000
74+
6875
// minDeathRowSize is the minimum death row slots.
6976
// Death row size scales with capacity to match pre-sharding behavior.
7077
minDeathRowSize = 8
@@ -225,7 +232,7 @@ func newS3FIFO[K comparable, V any](cfg *config) *s3fifo[K, V] {
225232
entries: xsync.NewMap[K, *entry[K, V]](xsync.WithPresize(size)),
226233
capacity: size,
227234
smallThresh: size * smallQueueRatio / 1000,
228-
ghostCap: size * 8, // 8x ghost capacity tuned via binary search
235+
ghostCap: size * ghostCapPerMille / 1000,
229236
ghostActive: newBloomFilter(size, ghostFPRate),
230237
ghostAging: newBloomFilter(size, ghostFPRate),
231238
deathRow: make([]*entry[K, V], deathRowSize),
@@ -614,8 +621,11 @@ func (c *s3fifo[K, V]) sampleAvgPeakFreq() uint32 {
614621
// If death row is full, the oldest pending entry is truly evicted.
615622
func (c *s3fifo[K, V]) sendToDeathRow(e *entry[K, V]) {
616623
// Compute adaptive threshold by sampling current entries.
617-
// Only admit entries with above-average frequency to death row.
618-
threshold := c.sampleAvgPeakFreq()
624+
// Only admit entries with above-threshold frequency to death row.
625+
threshold := c.sampleAvgPeakFreq() * deathRowThresholdPerMille / 1000
626+
if threshold == 0 {
627+
threshold = 1
628+
}
619629
if e.peakFreq.Load() < threshold {
620630
c.entries.Delete(e.key)
621631
c.addToGhost(e.hash, e.peakFreq.Load())

s3fifo_test.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -788,9 +788,10 @@ func TestS3FIFO_GhostQueueSize(t *testing.T) {
788788
capacity := 1000
789789
cache := newS3FIFO[int, int](&config{size: capacity})
790790

791-
// Ghost capacity should be 8x cache capacity (tuned via binary search)
792-
if cache.ghostCap != capacity*8 {
793-
t.Errorf("ghost capacity = %d; want %d", cache.ghostCap, capacity*8)
791+
// Ghost capacity should be 0.75x cache capacity (tuned via binary search)
792+
want := capacity * ghostCapPerMille / 1000
793+
if cache.ghostCap != want {
794+
t.Errorf("ghost capacity = %d; want %d", cache.ghostCap, want)
794795
}
795796
}
796797

0 commit comments

Comments
 (0)