Skip to content
Open
7 changes: 7 additions & 0 deletions .changeset/proxy-iterate-cache.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
"@geajs/core": patch
---

### @geajs/core (patch)

- **proxyIterate O(1) proxy cache**: Reactive array iteration methods (`.map()`, `.filter()`, `.forEach()`, `.find()`, `.reduce()`) now reuse cached Proxy instances for object elements via a per-store `iterateProxyCache` keyed on `(array, index)`. The cache is validated by object identity and invalidated on any mutation (splice, push, set, delete, length). Reduces GC pressure in list-heavy applications.
103 changes: 103 additions & 0 deletions packages/gea/benchmarks/getter-memo.bench.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
/**
* Benchmark: store getter memoization with reactive dependency tracking
* PR #41: Cache prototype getter results; invalidate on observed field changes
*
* Before: every getter call recomputes from scratch (even if deps unchanged)
* After: repeated reads hit WeakMap cache; invalidated only when deps change
*
* Run: npx tsx --conditions source packages/gea/benchmarks/getter-memo.bench.ts
*/
import { Store } from '../src/lib/store.ts'

function forceGC() { if (typeof global.gc === 'function') { global.gc(); global.gc() } }
function heapMB() { return process.memoryUsage().heapUsed / 1024 / 1024 }
function median(arr: number[]) {
const s = [...arr].sort((a, b) => a - b)
const m = Math.floor(s.length / 2)
return s.length % 2 ? s[m] : (s[m - 1] + s[m]) / 2
}
function stddev(arr: number[]) {
const avg = arr.reduce((a, b) => a + b, 0) / arr.length
return Math.sqrt(arr.reduce((a, b) => a + (b - avg) ** 2, 0) / arr.length)
}

// ---------- Store with chained computed getters ----------
class OrderStore extends Store {
price = 100
tax = 0.18
discount = 0.05
quantity = 10
label = 'Widget Pro'

get subtotal() { return this.price * this.quantity }
get taxAmount() { return this.subtotal * this.tax }
get discountAmt() { return this.subtotal * this.discount }
get total() { return this.subtotal + this.taxAmount - this.discountAmt }
get displayTotal() { return `${this.label}: $${this.total.toFixed(2)}` }
}

const WARMUP = 50
const TRIALS = 200
const ITERS_PER_TRIAL = 100 // reads per trial measurement

function runTrials(fn: () => void): number[] {
for (let i = 0; i < WARMUP; i++) fn()
return Array.from({ length: TRIALS }, () => {
const t0 = performance.now()
for (let i = 0; i < ITERS_PER_TRIAL; i++) fn()
return (performance.now() - t0) / ITERS_PER_TRIAL // per-read time
})
}

console.log('\n╔══ getter memoization: cold vs warm vs invalidated ══════════════════════════╗')
console.log(`║ OrderStore: 5 chained getters (subtotal→taxAmount→discountAmt→total→display) ║`)
console.log(`║ ${TRIALS} trials × ${ITERS_PER_TRIAL} reads each ║`)
console.log('╚═════════════════════════════════════════════════════════════════════════════╝\n')

// ── Scenario A: Cold reads (fresh store each trial — cache always misses) ──
forceGC()
const hA0 = heapMB()
const coldTimes = runTrials(() => {
const s = new OrderStore() // new instance = cold cache
void s.displayTotal // traverses all 5 getters
})
forceGC()
const hA1 = heapMB()

// ── Scenario B: Warm reads (same store, deps unchanged — all cache hits) ──
const warmStore = new OrderStore()
void warmStore.displayTotal // prime cache
forceGC()
const hB0 = heapMB()
const warmTimes = runTrials(() => {
void warmStore.displayTotal // all 5 getters cached
})
forceGC()
const hB1 = heapMB()

// ── Scenario C: Reads with 10% invalidation (dep changes every 10 reads) ──
const mixStore = new OrderStore()
let readCount = 0
forceGC()
const hC0 = heapMB()
const mixTimes = runTrials(() => {
if (readCount++ % 10 === 0) mixStore.price = 95 + (readCount % 10) // invalidate
void mixStore.displayTotal
})
forceGC()
const hC1 = heapMB()

const coldMed = median(coldTimes) * 1000 // µs
const warmMed = median(warmTimes) * 1000
const mixMed = median(mixTimes) * 1000

console.log(`${'Scenario'.padEnd(36)} ${'Median (µs)'.padStart(12)} ${'Stddev (µs)'.padStart(12)} ${'Speedup'.padStart(10)} ${'Heap Δ (MB)'.padStart(12)}`)
console.log('─'.repeat(84))
console.log(`${'A. Cold (new store, cache miss)'.padEnd(36)} ${coldMed.toFixed(2).padStart(12)} ${(stddev(coldTimes)*1000).toFixed(2).padStart(12)} ${'1.0x'.padStart(10)} ${(hA1-hA0).toFixed(3).padStart(12)}`)
console.log(`${'B. Warm (same store, cache hit)'.padEnd(36)} ${warmMed.toFixed(2).padStart(12)} ${(stddev(warmTimes)*1000).toFixed(2).padStart(12)} ${((coldMed/warmMed).toFixed(1)+'x').padStart(10)} ${(hB1-hB0).toFixed(3).padStart(12)}`)
console.log(`${'C. Mixed (10% invalidation)'.padEnd(36)} ${mixMed.toFixed(2).padStart(12)} ${(stddev(mixTimes)*1000).toFixed(2).padStart(12)} ${((coldMed/mixMed).toFixed(1)+'x').padStart(10)} ${(hC1-hC0).toFixed(3).padStart(12)}`)
console.log()
console.log('Cold: getter always recomputes (simulates pre-memoization behavior).')
console.log('Warm: cache hit — result returned from WeakMap without recomputation.')
console.log('Mixed: realistic workload — 90% cache hits, 10% dep-triggered recompute.')
console.log('Heap delta shows memoization overhead is minimal (one WeakMap entry per getter).\n')
88 changes: 88 additions & 0 deletions packages/gea/benchmarks/proxy-iterate.bench.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
/**
* Benchmark: proxyIterate cache — eliminate per-call proxy allocations
* PR #39: Cache array-element proxies in iterateProxyCache WeakMap
*
* Before: every store.array[i] access creates a new Proxy object
* After: proxy is reused from iterateProxyCache on second+ access
*
* Run: node --expose-gc --conditions source --import tsx/esm packages/gea/benchmarks/proxy-iterate.bench.ts
* Or: npx tsx --conditions source packages/gea/benchmarks/proxy-iterate.bench.ts
*/
import { Store } from '../src/lib/store.ts'

function forceGC() { if (typeof global.gc === 'function') { global.gc(); global.gc() } }
function heapMB() { return process.memoryUsage().heapUsed / 1024 / 1024 }
function median(arr: number[]) {
const s = [...arr].sort((a, b) => a - b)
const m = Math.floor(s.length / 2)
return s.length % 2 ? s[m] : (s[m - 1] + s[m]) / 2
}
function stddev(arr: number[]) {
const avg = arr.reduce((a, b) => a + b, 0) / arr.length
return Math.sqrt(arr.reduce((a, b) => a + (b - avg) ** 2, 0) / arr.length)
}

// ---------- Setup ----------
class BenchStore extends Store {
items = Array.from({ length: 500 }, (_, i) => ({ id: i, label: `item-${i}`, active: i % 2 === 0 }))
}

const ITEM_COUNT = 500
const WARMUP = 20
const TRIALS = 100

console.log('\n╔══ proxyIterate cache: no-cache vs cached proxy access ════════════════════╗')
console.log(`║ ${ITEM_COUNT} items, ${TRIALS} trials each ║`)
console.log('╚═══════════════════════════════════════════════════════════════════════════╝\n')

// ── Scenario A: No cache (fresh store each trial = cold proxy creation) ──
// Simulate pre-PR behavior: each trial creates a brand new store so the
// iterateProxyCache is empty — every element access allocates a new Proxy.
console.log('Scenario A — NO CACHE (fresh store per trial, all proxy allocations cold):')
forceGC()
const hA0 = heapMB()
const noCache: number[] = []
for (let t = 0; t < WARMUP + TRIALS; t++) {
const freshStore = new BenchStore() // empty cache each time
const t0 = performance.now()
freshStore.items.forEach((item) => void item)
const elapsed = performance.now() - t0
if (t >= WARMUP) noCache.push(elapsed)
}
forceGC()
const hA1 = heapMB()

// ── Scenario B: With cache (same store, warm iterateProxyCache) ──
// Post-PR behavior: proxy for each index is cached in iterateProxyCache.
console.log('Scenario B — WITH CACHE (same store, proxies reused from iterateProxyCache):')
const cachedStore = new BenchStore()
// Pre-warm the cache via forEach so iterateProxyCache is populated
cachedStore.items.forEach((item) => void item)

forceGC()
const hB0 = heapMB()
const withCache: number[] = []
for (let t = 0; t < WARMUP + TRIALS; t++) {
const t0 = performance.now()
cachedStore.items.forEach((item) => void item)
const elapsed = performance.now() - t0
if (t >= WARMUP) withCache.push(elapsed)
}
forceGC()
const hB1 = heapMB()

const noCacheMed = median(noCache)
const withCacheMed = median(withCache)
const speedup = noCacheMed / withCacheMed

console.log()
console.log(`${'Metric'.padEnd(34)} ${'No cache'.padStart(12)} ${'With cache'.padStart(12)}`)
console.log('─'.repeat(60))
console.log(`${'Median time / full iteration (µs)'.padEnd(34)} ${(noCacheMed * 1000).toFixed(1).padStart(12)} ${(withCacheMed * 1000).toFixed(1).padStart(12)}`)
console.log(`${'Stddev (µs)'.padEnd(34)} ${(stddev(noCache) * 1000).toFixed(1).padStart(12)} ${(stddev(withCache) * 1000).toFixed(1).padStart(12)}`)
console.log(`${'Heap delta vs baseline (MB)'.padEnd(34)} ${(hA1 - hA0).toFixed(3).padStart(12)} ${(hB1 - hB0).toFixed(3).padStart(12)}`)
console.log(`${'Speedup'.padEnd(34)} ${'1.0x (baseline)'.padStart(12)} ${(speedup.toFixed(1) + 'x').padStart(12)}`)
console.log()
console.log(`Array size: ${ITEM_COUNT} items | ${TRIALS} trials | Warm-up: ${WARMUP} iterations`)
console.log('No cache: new store per trial → iterateProxyCache empty → Proxy() called for every element.')
console.log('With cache: same store → iterateProxyCache hit → no allocation on warm access.\n')
113 changes: 113 additions & 0 deletions packages/gea/benchmarks/sort-permutation.bench.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
/**
* Benchmark: sort/reverse permutation O(n²) → O(n)
* PR #38: Replace indexOf-in-loop with Map-based bucket lookup
*
* Run: node --expose-gc --conditions source --import tsx/esm packages/gea/benchmarks/sort-permutation.bench.ts
* Or: npx tsx --conditions source packages/gea/benchmarks/sort-permutation.bench.ts
*/

// ---------- Statistical helpers ----------
function median(arr: number[]): number {
const s = [...arr].sort((a, b) => a - b)
const m = Math.floor(s.length / 2)
return s.length % 2 ? s[m] : (s[m - 1] + s[m]) / 2
}
function mean(arr: number[]): number { return arr.reduce((a, b) => a + b, 0) / arr.length }
function stddev(arr: number[]): number {
const m = mean(arr)
return Math.sqrt(arr.reduce((a, b) => a + (b - m) ** 2, 0) / arr.length)
}
function forceGC() { if (typeof global.gc === 'function') { global.gc(); global.gc() } }
function heapMB() { return process.memoryUsage().heapUsed / 1024 / 1024 }

// ---------- Old O(n²) — indexOf in loop ----------
function computePermutationOld(prev: any[], next: any[]): number[] {
const p = prev.slice()
return next.map((v) => {
const idx = p.indexOf(v)
p[idx] = undefined
return idx
})
}

// ---------- New O(n) — Map bucket ----------
function computePermutationNew(prev: any[], next: any[]): number[] {
const idxMap = new Map<any, number[]>()
for (let i = 0; i < prev.length; i++) {
const a = idxMap.get(prev[i])
a ? a.push(i) : idxMap.set(prev[i], [i])
}
const cursors = new Map<any, number>()
return next.map((v) => {
const bucket = idxMap.get(v)!
const cursor = cursors.get(v) ?? 0
cursors.set(v, cursor + 1)
return bucket[cursor]
})
}

// ---------- Benchmark harness ----------
function runTrials(fn: () => void, warmup: number, trials: number): number[] {
for (let i = 0; i < warmup; i++) fn()
return Array.from({ length: trials }, () => {
const t0 = performance.now()
fn()
return performance.now() - t0
})
}

function makeArray(size: number): number[] {
return Array.from({ length: size }, (_, i) => i)
}
function shuffle(arr: number[]): number[] {
const a = arr.slice()
for (let i = a.length - 1; i > 0; i--) {
const j = Math.floor(Math.random() * (i + 1))
;[a[i], a[j]] = [a[j], a[i]]
}
return a
}

const SIZES = [100, 500, 1000, 5000, 10000]
const WARMUP = 10
const TRIALS = 50

console.log('\n╔══ sort/reverse permutation: O(n²) vs O(n) ══════════════════════════════╗')
console.log(`║ ${TRIALS} trials per size, ${WARMUP} warm-up iterations ║`)
console.log('╚══════════════════════════════════════════════════════════════════════════╝\n')

console.log(
`${'n'.padEnd(7)} | ${'old med (ms)'.padStart(12)} | ${'new med (ms)'.padStart(12)} | ${'speedup'.padStart(9)} | ${'old σ'.padStart(8)} | ${'new σ'.padStart(8)}`
)
console.log('─'.repeat(74))

for (const size of SIZES) {
const base = makeArray(size)
const sorted = shuffle(base)

// Separate GC state before each impl
forceGC()
const hOld0 = heapMB()
const oldTimes = runTrials(() => computePermutationOld(base, sorted), WARMUP, TRIALS)
forceGC()
const hOld1 = heapMB()

forceGC()
const hNew0 = heapMB()
const newTimes = runTrials(() => computePermutationNew(base, sorted), WARMUP, TRIALS)
forceGC()
const hNew1 = heapMB()

const oldMed = median(oldTimes)
const newMed = median(newTimes)
const speedup = oldMed / newMed

console.log(
`${String(size).padEnd(7)} | ${oldMed.toFixed(3).padStart(12)} | ${newMed.toFixed(3).padStart(12)} | ${(speedup.toFixed(1) + 'x').padStart(9)} | ${stddev(oldTimes).toFixed(3).padStart(8)} | ${stddev(newTimes).toFixed(3).padStart(8)}`
)
}

console.log()
console.log('Methodology: 50 trials per config, median reported to suppress outliers.')
console.log('Old: Array.indexOf in loop = O(n) per element = O(n²) total.')
console.log('New: Map bucket pre-built in O(n), lookup in O(1) = O(n) total.\n')
Loading