-
Notifications
You must be signed in to change notification settings - Fork 72
Expand file tree
/
Copy pathmetrics_example.py
More file actions
231 lines (184 loc) · 7.28 KB
/
metrics_example.py
File metadata and controls
231 lines (184 loc) · 7.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
"""Demonstration of cachier's metrics and observability features."""
import time
from datetime import timedelta
from cachier import cachier
def demo_basic_metrics_tracking():
"""Demonstrate basic metrics tracking."""
print("=" * 60)
print("Example 1: Basic Metrics Tracking")
print("=" * 60)
@cachier(backend="memory", enable_metrics=True)
def expensive_operation(x):
"""Simulate an expensive computation."""
time.sleep(0.1) # Simulate work
return x**2
expensive_operation.clear_cache()
# First call - cache miss
print("\nFirst call (cache miss):")
result1 = expensive_operation(5)
print(f" Result: {result1}")
stats = expensive_operation.metrics.get_stats()
print(f" Hits: {stats.hits}, Misses: {stats.misses}")
print(f" Hit rate: {stats.hit_rate:.1f}%")
print(f" Avg latency: {stats.avg_latency_ms:.2f}ms")
# Second call - cache hit
print("\nSecond call (cache hit):")
result2 = expensive_operation(5)
print(f" Result: {result2}")
stats = expensive_operation.metrics.get_stats()
print(f" Hits: {stats.hits}, Misses: {stats.misses}")
print(f" Hit rate: {stats.hit_rate:.1f}%")
print(f" Avg latency: {stats.avg_latency_ms:.2f}ms")
# Third call with different argument - cache miss
print("\nThird call with different argument (cache miss):")
result3 = expensive_operation(10)
print(f" Result: {result3}")
stats = expensive_operation.metrics.get_stats()
print(f" Hits: {stats.hits}, Misses: {stats.misses}")
print(f" Hit rate: {stats.hit_rate:.1f}%")
print(f" Avg latency: {stats.avg_latency_ms:.2f}ms")
print(f" Total calls: {stats.total_calls}")
def demo_stale_cache_tracking():
"""Demonstrate stale cache tracking."""
print("\n" + "=" * 60)
print("Example 2: Stale Cache Tracking")
print("=" * 60)
@cachier(
backend="memory",
enable_metrics=True,
stale_after=timedelta(seconds=1),
next_time=False,
)
def time_sensitive_operation(x):
"""Operation with stale_after configured."""
return x * 2
time_sensitive_operation.clear_cache()
# Initial call
print("\nInitial call:")
result = time_sensitive_operation(5)
print(f" Result: {result}")
# Call while fresh
print("\nCall while fresh (within 1 second):")
result = time_sensitive_operation(5)
print(f" Result: {result}")
# Wait for cache to become stale
print("\nWaiting for cache to become stale...")
time.sleep(1.5)
# Call after stale
print("Call after cache is stale:")
result = time_sensitive_operation(5)
print(f" Result: {result}")
stats = time_sensitive_operation.metrics.get_stats()
print("\nMetrics after stale access:")
print(f" Hits: {stats.hits}")
print(f" Stale hits: {stats.stale_hits}")
print(f" Recalculations: {stats.recalculations}")
def demo_metrics_sampling():
"""Demonstrate metrics sampling to reduce overhead."""
print("\n" + "=" * 60)
print("Example 3: Metrics Sampling (50% sampling rate)")
print("=" * 60)
@cachier(
backend="memory",
enable_metrics=True,
metrics_sampling_rate=0.5, # Only sample 50% of calls
)
def sampled_operation(x):
"""Operation with reduced metrics sampling."""
return x + 1
sampled_operation.clear_cache()
# Make many calls
print("\nMaking 100 calls with 10 unique arguments...")
for i in range(100):
sampled_operation(i % 10)
stats = sampled_operation.metrics.get_stats()
print("\nMetrics (with 50% sampling):")
print(f" Total calls recorded: {stats.total_calls}")
print(f" Hits: {stats.hits}")
print(f" Misses: {stats.misses}")
print(f" Hit rate: {stats.hit_rate:.1f}%")
print(" Note: Total calls < 100 due to sampling; hit rate is approximately representative of overall behavior.")
def demo_comprehensive_metrics():
"""Demonstrate a comprehensive metrics snapshot."""
print("\n" + "=" * 60)
print("Example 4: Comprehensive Metrics Snapshot")
print("=" * 60)
@cachier(backend="memory", enable_metrics=True, entry_size_limit="1KB")
def comprehensive_operation(x):
"""Operation to demonstrate all metrics."""
if x > 1000:
# Return large data to trigger size limit rejection
return "x" * 2000
return x * 2
comprehensive_operation.clear_cache()
# Generate various metric events
comprehensive_operation(5) # Miss + recalculation
comprehensive_operation(5) # Hit
comprehensive_operation(10) # Miss + recalculation
comprehensive_operation(2000) # Size limit rejection
stats = comprehensive_operation.metrics.get_stats()
print(
f"\nComplete metrics snapshot:\n"
f" Hits: {stats.hits}\n"
f" Misses: {stats.misses}\n"
f" Hit rate: {stats.hit_rate:.1f}%\n"
f" Total calls: {stats.total_calls}\n"
f" Avg latency: {stats.avg_latency_ms:.2f}ms\n"
f" Stale hits: {stats.stale_hits}\n"
f" Recalculations: {stats.recalculations}\n"
f" Wait timeouts: {stats.wait_timeouts}\n"
f" Size limit rejections: {stats.size_limit_rejections}\n"
f" Entry count: {stats.entry_count}\n"
f" Total size (bytes): {stats.total_size_bytes}"
)
def demo_programmatic_monitoring():
"""Demonstrate programmatic cache health monitoring."""
print("\n" + "=" * 60)
print("Example 5: Programmatic Monitoring")
print("=" * 60)
@cachier(backend="memory", enable_metrics=True)
def monitored_operation(x):
"""Operation being monitored."""
return x**3
monitored_operation.clear_cache()
def check_cache_health(func, threshold=80.0):
"""Check if cache hit rate meets threshold."""
stats = func.metrics.get_stats()
if stats.total_calls == 0:
return True, "No calls yet"
if stats.hit_rate >= threshold:
return True, f"Hit rate {stats.hit_rate:.1f}% meets threshold"
else:
return (
False,
f"Hit rate {stats.hit_rate:.1f}% below threshold {threshold}%",
)
# Simulate some usage
print("\nSimulating cache usage...")
for i in range(20):
monitored_operation(i % 5)
# Check health
is_healthy, message = check_cache_health(monitored_operation, threshold=70.0)
print("\nCache health check:")
print(f" Status: {'OK HEALTHY' if is_healthy else 'UNHEALTHY'}")
print(f" {message}")
stats = monitored_operation.metrics.get_stats()
print(f" Details: {stats.hits} hits, {stats.misses} misses")
def main():
"""Run all metrics demonstration examples."""
demo_basic_metrics_tracking()
demo_stale_cache_tracking()
demo_metrics_sampling()
demo_comprehensive_metrics()
demo_programmatic_monitoring()
print("\n" + "=" * 60)
print("Examples complete!")
print("=" * 60)
print("\nKey takeaways:")
print(" - Metrics are opt-in via enable_metrics=True")
print(" - Access metrics via function.metrics.get_stats()")
print(" - Sampling reduces overhead for high-traffic functions")
print(" - Metrics are thread-safe and backend-agnostic")
print(" - Use for production monitoring and optimization")
if __name__ == "__main__":
main()