-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathinterner.rs
More file actions
1535 lines (1415 loc) · 56.9 KB
/
interner.rs
File metadata and controls
1535 lines (1415 loc) · 56.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//! Simple key interner for mapping external keys to compact handles.
//!
//! Assigns monotonically increasing `u64` handles to unique keys, enabling
//! fast lookups while avoiding repeated key cloning in hot paths.
//!
//! ## Architecture
//!
//! ```text
//! ┌────────────────────────────────────────────────────────────────────────────┐
//! │ KeyInterner Layout │
//! │ │
//! │ ┌─────────────────────────────────────────────────────────────────────┐ │
//! │ │ index: HashMap<K, u64, S> keys: Vec<K> │ │
//! │ │ │ │
//! │ │ ┌────────────────────────┐ ┌─────────────────────────┐ │ │
//! │ │ │ Key Handle │ │ Index Key │ │ │
//! │ │ ├────────────────────────┤ ├─────────────────────────┤ │ │
//! │ │ │ "user:123" → 0 │ │ 0 "user:123" │ │ │
//! │ │ │ "user:456" → 1 │ │ 1 "user:456" │ │ │
//! │ │ │ "session:a" → 2 │ │ 2 "session:a" │ │ │
//! │ │ └────────────────────────┘ └─────────────────────────┘ │ │
//! │ │ │ │
//! │ │ intern("user:123") ──► lookup in index ──► return 0 │ │
//! │ │ resolve(1) ──► keys[1] ──► "user:456" │ │
//! │ └─────────────────────────────────────────────────────────────────────┘ │
//! │ │
//! │ Data Flow │
//! │ ───────── │
//! │ intern(key): │
//! │ 1. Check index for existing handle │
//! │ 2. If found: return handle │
//! │ 3. If not: assign handle = keys.len(), store in both structures │
//! │ │
//! │ resolve(handle): │
//! │ 1. Direct index into keys vector: O(1) │
//! │ │
//! └────────────────────────────────────────────────────────────────────────────┘
//! ```
//!
//! ## Key Components
//!
//! - [`KeyInterner`]: Maps keys to compact `u64` handles
//!
//! ## Operations
//!
//! | Operation | Description | Complexity |
//! |-------------------------|----------------------------------------------|------------|
//! | `new` | Create an empty interner | O(1) |
//! | `with_capacity` | Create an empty interner with reserved space | O(1) |
//! | `with_hasher` | Create an empty interner with a custom hasher| O(1) |
//! | `intern` | Get or create handle for key | O(1) avg |
//! | `try_intern` | Fallible `intern`, respects `MAX_CAPACITY` | O(1) avg |
//! | `get_handle` | Lookup handle without inserting | O(1) avg |
//! | `get_handle_borrowed` | Lookup handle using a borrowed key form | O(1) avg |
//! | `resolve` | Convert handle back to key reference | O(1) |
//! | `len` | Return number of interned keys | O(1) |
//! | `is_empty` | Check whether any keys are interned | O(1) |
//! | `clear` | Remove all interned keys (bumps generation) | O(n) |
//! | `shrink_to_fit` | Shrink backing storage to fit length | O(n) |
//! | `clear_shrink` | Clear all keys and release spare capacity | O(n) |
//! | `approx_bytes` | Estimate memory footprint | O(1) |
//! | `generation` | Epoch counter bumped on every `clear` | O(1) |
//! | `iter` | Iterate over `(handle, key)` pairs | O(n) total |
//!
//! ## Use Cases
//!
//! - **Handle-based caches**: Avoid cloning large keys on every access
//! - **Frequency tracking**: Use compact handles as frequency map keys
//! - **Deduplication**: Ensure each unique key has exactly one handle
//!
//! ## Example Usage
//!
//! ```
//! use cachekit::ds::KeyInterner;
//!
//! let mut interner: KeyInterner<String> = KeyInterner::new();
//!
//! // Intern keys to get compact handles
//! let h1 = interner.intern(&"long_key_name_1".to_owned());
//! let _h2 = interner.intern(&"long_key_name_2".to_owned());
//!
//! // Same key returns same handle
//! let h1_again = interner.intern(&"long_key_name_1".to_owned());
//! assert_eq!(h1, h1_again);
//!
//! // Resolve handle back to key
//! assert_eq!(interner.resolve(h1).map(String::as_str), Some("long_key_name_1"));
//! ```
//!
//! ## Use Case: Handle-Based Cache
//!
//! ```
//! use cachekit::ds::KeyInterner;
//! use std::collections::HashMap;
//!
//! // External keys are strings, internal cache uses u64 handles
//! let mut interner = KeyInterner::new();
//! let mut cache: HashMap<u64, Vec<u8>> = HashMap::new();
//!
//! fn put(interner: &mut KeyInterner<String>, cache: &mut HashMap<u64, Vec<u8>>,
//! key: &str, value: Vec<u8>) {
//! let handle = interner.intern(&key.to_owned());
//! cache.insert(handle, value);
//! }
//!
//! fn get<'a>(interner: &KeyInterner<String>, cache: &'a HashMap<u64, Vec<u8>>,
//! key: &str) -> Option<&'a Vec<u8>> {
//! let handle = interner.get_handle_borrowed(key)?;
//! cache.get(&handle)
//! }
//!
//! put(&mut interner, &mut cache, "session:abc", vec![1, 2, 3]);
//! assert!(get(&interner, &cache, "session:abc").is_some());
//! ```
//!
//! ## Thread Safety
//!
//! `KeyInterner<K, S>` is `Send + Sync` when `K` and `S` are, but provides no
//! internal synchronization. For shared mutable access, wrap in
//! `parking_lot::RwLock` or similar synchronization primitive.
//!
//! ## Security
//!
//! The default hasher is [`rustc_hash::FxBuildHasher`], chosen for speed on
//! trusted input. **`FxHash` is non-cryptographic and is not resistant to
//! hash-flooding / HashDoS attacks.** If a `KeyInterner` may observe keys
//! derived from untrusted input (for example, cache keys sourced from HTTP
//! URLs, user IDs, request parameters, or bearer tokens), callers should
//! either:
//!
//! - construct it with a DoS-resistant hasher via
//! [`KeyInterner::with_hasher`] /
//! [`KeyInterner::with_capacity_and_hasher`] (for example
//! [`std::collections::hash_map::RandomState`]), or
//! - preprocess keys into a form the attacker cannot control the hash of.
//!
//! `KeyInterner` is also **append-only**: keys are never removed except via
//! [`KeyInterner::clear`] / [`KeyInterner::clear_shrink`], so an attacker
//! who can trigger interning of unique keys can drive memory usage without
//! bound. Two mitigations are provided:
//!
//! 1. [`KeyInterner::MAX_CAPACITY`] caps the total number of unique keys.
//! [`KeyInterner::intern`] panics when the cap is reached;
//! [`KeyInterner::try_intern`] returns
//! [`InternerError::CapacityExceeded`] instead. When keys may come from
//! untrusted input, always use `try_intern` and enforce a smaller
//! admission-control bound of your own on top.
//! 2. [`KeyInterner::try_with_capacity`] refuses oversized preallocations
//! rather than aborting the process with an allocator error.
//!
//! **Handles are not capability tokens.** They are plain sequential
//! `u64`s; there is no cryptographic or structural guarantee that prevents
//! a handle minted by one `KeyInterner` from accidentally resolving on
//! another. Two specific hazards:
//!
//! - *Cross-instance confusion.* A handle from `interner_a` used against
//! `interner_b` will silently resolve to whatever happens to live at
//! that index in `interner_b`, not to `None`.
//! - *Clear-cycle reuse.* After [`KeyInterner::clear`] /
//! [`KeyInterner::clear_shrink`], handles restart from `0`. A caller
//! who stored handles externally and then interns new keys will see
//! the stored handles silently resolve to unrelated keys. Use
//! [`KeyInterner::generation`] to detect staleness: capture
//! `generation()` when you store a handle, compare before using it,
//! and treat mismatches as "handle invalidated".
//!
//! If you are exposing handles across a trust boundary, wrap
//! `(generation, handle)` together and validate both before `resolve`.
//!
//! `KeyInterner`'s [`std::fmt::Debug`] impl deliberately omits the
//! interned keys themselves and only reports length / capacity /
//! generation. Keys are frequently sensitive (URLs with query strings,
//! user IDs, auth material), and a single `eprintln!("{:?}", interner)`
//! would otherwise spill every interned key to logs. Use
//! [`KeyInterner::iter`] explicitly when you need the full contents.
//!
//! ## Implementation Notes
//!
//! - Handles are assigned monotonically starting at 0
//! - Keys are never removed (append-only design); use [`KeyInterner::clear`]
//! to drop the whole table and restart from handle `0`
//! - Both `index` and `keys` store copies of the key
//! - `K`'s `Hash`, `Eq`, and `Clone` impls must be mutually consistent:
//! `a == b` implies `hash(a) == hash(b)`, and `a.clone() == a`. Violating
//! this contract either leaks entries (one stored handle, another new
//! handle minted for the same key on next `intern`) or, for panicking
//! `Hash` / `Eq` impls during `HashMap::insert`, leaves the interner in
//! an internally consistent but reduced state (see
//! [`KeyInterner::intern`] for the full exception-safety contract).
use rustc_hash::FxBuildHasher;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::collections::TryReserveError;
use std::hash::{BuildHasher, Hash};
/// Error type returned by [`KeyInterner::try_intern`] and
/// [`KeyInterner::try_with_capacity`] /
/// [`KeyInterner::try_with_capacity_and_hasher`].
///
/// Exists to give callers a non-panicking path when keys or capacities come
/// from untrusted input. See the module-level [security
/// notes](crate::ds::interner#security).
#[derive(Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum InternerError {
/// The interner is already at [`KeyInterner::MAX_CAPACITY`] and cannot
/// accept another unique key.
CapacityExceeded,
/// The underlying allocator refused a growth request. Carries the
/// original [`TryReserveError`] so callers can inspect the cause.
AllocationFailed(TryReserveError),
}
impl std::fmt::Display for InternerError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::CapacityExceeded => write!(
f,
"KeyInterner is at MAX_CAPACITY; refusing to intern another unique key"
),
Self::AllocationFailed(e) => write!(f, "KeyInterner allocation failed: {e}"),
}
}
}
impl std::error::Error for InternerError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::AllocationFailed(e) => Some(e),
_ => None,
}
}
}
impl From<TryReserveError> for InternerError {
fn from(e: TryReserveError) -> Self {
Self::AllocationFailed(e)
}
}
/// Monotonic key interner that assigns a `u64` handle to each unique key.
///
/// Maps external keys to compact `u64` handles for efficient storage and lookup.
/// Handles are assigned sequentially starting from 0 and never reused **within
/// a single generation**; see [`KeyInterner::generation`] for detecting reuse
/// across [`KeyInterner::clear`] cycles.
///
/// # Type Parameters
///
/// - `K`: Key type, must be `Eq + Hash + Clone` for [`intern`](Self::intern)
/// - `S`: Hash builder; defaults to [`FxBuildHasher`]. Swap for a
/// DoS-resistant builder (e.g. [`std::collections::hash_map::RandomState`])
/// when keys may come from untrusted input — see the module-level
/// [security notes](self#security).
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
///
/// // Intern returns a handle
/// let handle = interner.intern(&"my_key");
/// assert_eq!(handle, 0); // First key gets handle 0
///
/// // Same key returns same handle
/// assert_eq!(interner.intern(&"my_key"), 0);
///
/// // Different key gets next handle
/// assert_eq!(interner.intern(&"other_key"), 1);
///
/// // Resolve handle back to key
/// assert_eq!(interner.resolve(0), Some(&"my_key"));
/// ```
///
/// # Use Case: Frequency Tracking
///
/// ```
/// use cachekit::ds::KeyInterner;
/// use std::collections::HashMap;
///
/// let mut interner = KeyInterner::new();
/// let mut freq: HashMap<u64, u32> = HashMap::new();
///
/// // Track access frequency using handles (cheaper than cloning keys)
/// fn access(interner: &mut KeyInterner<String>, freq: &mut HashMap<u64, u32>, key: &str) {
/// let handle = interner.intern(&key.to_owned());
/// *freq.entry(handle).or_insert(0) += 1;
/// }
///
/// access(&mut interner, &mut freq, "page_a");
/// access(&mut interner, &mut freq, "page_a");
/// access(&mut interner, &mut freq, "page_b");
///
/// let handle_a = interner.get_handle_borrowed("page_a").unwrap();
/// assert_eq!(freq[&handle_a], 2);
/// ```
pub struct KeyInterner<K, S = FxBuildHasher> {
index: HashMap<K, u64, S>,
keys: Vec<K>,
/// Monotonic epoch counter bumped every time [`clear`](Self::clear) /
/// [`clear_shrink`](Self::clear_shrink) invalidates existing handles.
/// Callers that hold handles across a clear can compare
/// [`generation`](Self::generation) before and after use to detect
/// the staleness described in the module-level security notes.
generation: u64,
}
// Manual `Debug` impl: only print aggregate state, not the interned keys
// themselves. Keys commonly contain sensitive data (URLs with query strings,
// user IDs, auth tokens); the derived `Debug` would dump every one of them
// on any `{:?}` or `panic!` formatting, which is an avoidable information-
// disclosure vector. See the module-level security notes.
//
// `finish_non_exhaustive` communicates to readers that internal storage is
// intentionally hidden rather than forgotten.
impl<K, S> std::fmt::Debug for KeyInterner<K, S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("KeyInterner")
.field("len", &self.keys.len())
.field("capacity", &self.keys.capacity())
.field("index_capacity", &self.index.capacity())
.field("generation", &self.generation)
.finish_non_exhaustive()
}
}
impl<K, S> Clone for KeyInterner<K, S>
where
K: Clone + Eq + Hash,
S: BuildHasher + Clone,
{
fn clone(&self) -> Self {
// Clone with tight capacity (len, not source capacity) so a cleared-
// but-unshrunk interner doesn't propagate its oversized allocation
// through every subsequent `.clone()`. This is a defensive measure
// against memory-DoS amplification when clones fan out.
let len = self.keys.len();
let hasher = self.index.hasher().clone();
let mut new_index = HashMap::with_capacity_and_hasher(len, hasher);
for (k, &v) in self.index.iter() {
new_index.insert(k.clone(), v);
}
let new_keys = self.keys.clone();
Self {
index: new_index,
keys: new_keys,
generation: self.generation,
}
}
}
impl<K, S> Default for KeyInterner<K, S>
where
S: Default,
{
fn default() -> Self {
Self {
index: HashMap::with_hasher(S::default()),
keys: Vec::new(),
generation: 0,
}
}
}
impl<K> KeyInterner<K, FxBuildHasher> {
/// Creates an empty interner with the default [`FxBuildHasher`].
///
/// # Security
///
/// The default hasher is **not** DoS-resistant. If keys may be
/// attacker-controlled, prefer [`KeyInterner::with_hasher`] with a
/// randomised builder such as
/// [`std::collections::hash_map::RandomState`]. See the module-level
/// [security notes](self#security).
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let interner: KeyInterner<String> = KeyInterner::new();
/// assert!(interner.is_empty());
/// ```
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// Creates an interner with pre-allocated capacity, using the default
/// [`FxBuildHasher`].
///
/// The requested `capacity` is silently clamped to
/// [`KeyInterner::MAX_CAPACITY`] and, beyond that, the allocator is
/// allowed to refuse the reservation without aborting. This keeps
/// configuration-derived capacities from crashing the process.
/// Use [`try_with_capacity`](Self::try_with_capacity) to observe
/// both the clamp and the allocator failure explicitly.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let interner: KeyInterner<String> = KeyInterner::with_capacity(1000);
/// assert!(interner.is_empty());
/// ```
#[must_use]
pub fn with_capacity(capacity: usize) -> Self
where
K: Eq + Hash,
{
Self::with_capacity_and_hasher(capacity, FxBuildHasher)
}
/// Fallible version of [`with_capacity`](Self::with_capacity): returns an
/// error instead of clamping if `capacity > MAX_CAPACITY`, and
/// surfaces allocator failures rather than aborting.
///
/// # Errors
///
/// - [`InternerError::CapacityExceeded`] if `capacity > MAX_CAPACITY`.
/// - [`InternerError::AllocationFailed`] if the underlying allocator
/// refuses the reservation.
pub fn try_with_capacity(capacity: usize) -> Result<Self, InternerError>
where
K: Eq + Hash,
{
Self::try_with_capacity_and_hasher(capacity, FxBuildHasher)
}
}
impl<K, S> KeyInterner<K, S> {
/// Maximum number of unique keys a single `KeyInterner` will hold.
///
/// Chosen to bound per-instance memory at a clearly "not-usually-legit"
/// ceiling while leaving several orders of magnitude of headroom over
/// realistic cache-key cardinalities. Even at the cap, the interner
/// occupies at least `MAX_CAPACITY * (size_of::<(K, u64)>() + size_of::<K>())`
/// bytes — for `K = String` the hidden heap-allocated payloads dominate
/// this figure, so callers exposing `KeyInterner` to untrusted input
/// should impose a much smaller admission-control cap of their own.
///
/// Derived from `isize::MAX as usize / 64` to stay well below the
/// allocation limit on the target platform.
pub const MAX_CAPACITY: usize = (isize::MAX as usize) / 64;
/// Creates an empty interner with the given hash builder.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
/// use std::collections::hash_map::RandomState;
///
/// // DoS-resistant hasher for untrusted keys.
/// let interner: KeyInterner<String, RandomState> =
/// KeyInterner::with_hasher(RandomState::new());
/// assert!(interner.is_empty());
/// ```
#[must_use]
pub fn with_hasher(hash_builder: S) -> Self {
Self {
index: HashMap::with_hasher(hash_builder),
keys: Vec::new(),
generation: 0,
}
}
/// Creates an interner with pre-allocated capacity and a custom hash builder.
///
/// `capacity` is silently clamped to [`KeyInterner::MAX_CAPACITY`],
/// and any further allocator refusal is absorbed rather than aborting
/// the process. Use
/// [`try_with_capacity_and_hasher`](Self::try_with_capacity_and_hasher)
/// to observe the clamp or allocator failure.
#[must_use]
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self
where
K: Eq + Hash,
S: BuildHasher,
{
// Clamp + best-effort reserve: `with_capacity` is the infallible
// constructor, so it must never abort the process on an oversized
// request. We first clamp to `MAX_CAPACITY` (which defends against
// `usize::MAX` arithmetic), then use `try_reserve` so the
// allocator is free to refuse any remaining too-large request
// without aborting. Callers who need to observe that path
// should use `try_with_capacity_and_hasher` instead.
let clamped = capacity.min(Self::MAX_CAPACITY);
let mut keys: Vec<K> = Vec::new();
let mut index: HashMap<K, u64, S> = HashMap::with_hasher(hash_builder);
let _ = keys.try_reserve(clamped);
let _ = index.try_reserve(clamped);
Self {
index,
keys,
generation: 0,
}
}
/// Fallible version of
/// [`with_capacity_and_hasher`](Self::with_capacity_and_hasher).
///
/// # Errors
///
/// - [`InternerError::CapacityExceeded`] if `capacity > MAX_CAPACITY`.
/// - [`InternerError::AllocationFailed`] if the allocator refuses.
pub fn try_with_capacity_and_hasher(
capacity: usize,
hash_builder: S,
) -> Result<Self, InternerError>
where
K: Eq + Hash,
S: BuildHasher,
{
if capacity > Self::MAX_CAPACITY {
return Err(InternerError::CapacityExceeded);
}
let mut keys: Vec<K> = Vec::new();
keys.try_reserve(capacity)?;
let mut index: HashMap<K, u64, S> = HashMap::with_hasher(hash_builder);
index.try_reserve(capacity)?;
Ok(Self {
index,
keys,
generation: 0,
})
}
/// Returns a reference to the hash builder used by this interner.
#[must_use]
pub fn hasher(&self) -> &S {
self.index.hasher()
}
/// Returns the current generation counter.
///
/// The counter starts at `0` and is incremented on every
/// [`clear`](Self::clear) / [`clear_shrink`](Self::clear_shrink). Callers
/// that persist handles across a possible clear can store
/// `(generation, handle)` pairs and reject handles whose recorded
/// generation no longer matches the live value — this is the documented
/// mitigation for the clear-cycle handle-reuse hazard described in the
/// module-level [security notes](self#security).
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner: KeyInterner<String> = KeyInterner::new();
/// let gen_before = interner.generation();
/// let h = interner.intern(&"k".to_owned());
///
/// // Later, after a clear_shrink elsewhere:
/// interner.clear_shrink();
/// let gen_after = interner.generation();
/// assert_ne!(gen_before, gen_after);
///
/// // The stored handle is stale.
/// let stored = (gen_before, h);
/// assert_ne!(stored.0, gen_after);
/// ```
#[must_use]
pub fn generation(&self) -> u64 {
self.generation
}
}
impl<K, S> KeyInterner<K, S>
where
K: Eq + Hash + Clone,
S: BuildHasher,
{
/// Returns the handle for `key`, inserting it if missing.
///
/// If the key is already interned, returns the existing handle.
/// Otherwise, assigns the next sequential handle and stores the key.
///
/// # Panics
///
/// Panics if the interner is already holding [`MAX_CAPACITY`]
/// unique keys, or if the allocator refuses to grow the backing
/// storage. Use [`try_intern`](Self::try_intern) for a non-panicking
/// variant — callers that process untrusted keys should always prefer
/// `try_intern`, since this method is a trivial DoS vector otherwise.
///
/// [`MAX_CAPACITY`]: Self::MAX_CAPACITY
///
/// # Exception safety
///
/// `intern` is designed so that, assuming `K`'s `Hash` / `Eq` / `Clone`
/// impls do not panic, a panic from `HashMap::insert` cannot leave the
/// interner with `len(keys) > len(index)` — which would otherwise
/// cause a subsequent `intern(&same_key)` to mint a *second* handle
/// for the same key and permanently strand the first one. In detail:
///
/// 1. Capacity is reserved via `try_reserve` up front, so neither
/// the `Vec::push` nor the `HashMap::insert` below can fail for
/// allocator reasons.
/// 2. The key is then inserted into `index` **first**. If `K::hash`
/// or `K::eq` panics (a pathological impl), the `keys` vector is
/// untouched, so the interner retains its invariant.
/// 3. Only after the insert returns do we push onto `keys`.
///
/// Panicking `Clone` / `Drop` impls for `K` can still violate the
/// invariant; the standard-library `HashMap` makes no stronger
/// guarantee either.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
///
/// // First key gets handle 0
/// let h1 = interner.intern(&"key_a");
/// assert_eq!(h1, 0);
///
/// // Second key gets handle 1
/// let h2 = interner.intern(&"key_b");
/// assert_eq!(h2, 1);
///
/// // Same key returns same handle (no new entry)
/// let h1_again = interner.intern(&"key_a");
/// assert_eq!(h1_again, 0);
/// assert_eq!(interner.len(), 2); // Still only 2 keys
/// ```
#[track_caller]
pub fn intern(&mut self, key: &K) -> u64 {
match self.try_intern(key) {
Ok(id) => id,
Err(InternerError::CapacityExceeded) => panic!(
"KeyInterner::intern: reached MAX_CAPACITY ({}); use try_intern to handle this gracefully",
Self::MAX_CAPACITY
),
Err(InternerError::AllocationFailed(e)) => {
panic!("KeyInterner::intern: allocation failed: {e}")
},
}
}
/// Fallible counterpart to [`intern`](Self::intern).
///
/// Returns the existing handle if `key` has already been interned;
/// otherwise assigns the next sequential handle and stores the key.
/// Returns [`InternerError::CapacityExceeded`] if the interner is
/// already at [`MAX_CAPACITY`], and [`InternerError::AllocationFailed`]
/// if the allocator refuses growth. **Preferred over `intern` whenever
/// keys can come from untrusted input.**
///
/// [`MAX_CAPACITY`]: Self::MAX_CAPACITY
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner: KeyInterner<String> = KeyInterner::new();
/// let h = interner.try_intern(&"k".to_owned()).unwrap();
/// assert_eq!(h, 0);
/// // Idempotent just like `intern`.
/// assert_eq!(interner.try_intern(&"k".to_owned()).unwrap(), h);
/// ```
pub fn try_intern(&mut self, key: &K) -> Result<u64, InternerError> {
if let Some(&id) = self.index.get(key) {
return Ok(id);
}
if self.keys.len() >= Self::MAX_CAPACITY {
return Err(InternerError::CapacityExceeded);
}
// Reserve up front so neither push nor insert can fail for
// allocator reasons below. This is the load-bearing precondition
// for the exception-safety argument documented on `intern`.
self.keys.try_reserve(1)?;
self.index.try_reserve(1)?;
let id = self.keys.len() as u64;
// Clone twice up front so any panic from `K::clone` happens
// before we mutate either container.
let k_for_index = key.clone();
let k_for_keys = key.clone();
// Insert into the index FIRST. If `K::hash` / `K::eq` panics
// here, the vector is untouched and the interner's
// len(keys) == len(index) invariant holds.
self.index.insert(k_for_index, id);
// Now push. After `try_reserve`, this is infallible for the
// `Vec` itself; `Drop` of the pushed value cannot run on push,
// so no user code executes between these two statements.
self.keys.push(k_for_keys);
Ok(id)
}
}
impl<K, S> KeyInterner<K, S>
where
K: Eq + Hash,
S: BuildHasher,
{
/// Returns the handle for `key` if it exists.
///
/// Does not insert the key if missing.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// let handle = interner.intern(&"existing");
///
/// assert_eq!(interner.get_handle(&"existing"), Some(handle));
/// assert_eq!(interner.get_handle(&"missing"), None);
/// ```
#[must_use]
pub fn get_handle(&self, key: &K) -> Option<u64> {
self.get_handle_borrowed(key)
}
/// Returns the handle for a borrowed form of `K` if it exists.
///
/// This enables allocation-free lookups for owned key types like `String`
/// by querying with `&str`.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner: KeyInterner<String> = KeyInterner::new();
/// interner.intern(&"hello".to_string());
///
/// // Lookup by &str without allocating a String
/// assert_eq!(interner.get_handle_borrowed("hello"), Some(0));
/// assert_eq!(interner.get_handle_borrowed("missing"), None);
/// ```
#[must_use]
pub fn get_handle_borrowed<Q>(&self, key: &Q) -> Option<u64>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized,
{
self.index.get(key).copied()
}
/// Shrinks internal storage to fit current length.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// for i in 0..100u32 {
/// interner.intern(&i);
/// }
/// interner.clear();
/// interner.shrink_to_fit();
/// ```
pub fn shrink_to_fit(&mut self) {
self.index.shrink_to_fit();
self.keys.shrink_to_fit();
}
/// Clears all interned keys and shrinks internal storage.
///
/// After calling this, **all previously returned handles become
/// invalid** and the [`generation`](Self::generation) counter is
/// bumped. Callers holding handles across the clear should compare
/// `generation()` to detect staleness — see the module-level
/// [security notes](self#security).
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// let handle = interner.intern(&"key");
/// assert_eq!(interner.resolve(handle), Some(&"key"));
///
/// interner.clear_shrink();
/// assert!(interner.is_empty());
/// assert_eq!(interner.resolve(handle), None); // Handle now invalid
/// ```
pub fn clear_shrink(&mut self) {
self.clear();
self.shrink_to_fit();
}
}
impl<K, S> KeyInterner<K, S> {
/// Resolves a handle to its original key.
///
/// Returns `None` if the handle is out of bounds.
///
/// **Note:** handles are not capability-safe across
/// [`clear`](Self::clear) cycles or across distinct `KeyInterner`
/// instances. See the module-level [security notes](self#security).
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// let handle = interner.intern(&"my_key");
///
/// assert_eq!(interner.resolve(handle), Some(&"my_key"));
/// assert_eq!(interner.resolve(999), None); // Invalid handle
/// ```
#[must_use]
pub fn resolve(&self, handle: u64) -> Option<&K> {
let index = usize::try_from(handle).ok()?;
self.keys.get(index)
}
/// Returns the number of interned keys.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// assert_eq!(interner.len(), 0);
///
/// interner.intern(&"a");
/// interner.intern(&"b");
/// assert_eq!(interner.len(), 2);
///
/// // Re-interning same key doesn't increase count
/// interner.intern(&"a");
/// assert_eq!(interner.len(), 2);
/// ```
#[must_use]
pub fn len(&self) -> usize {
self.keys.len()
}
/// Returns `true` if no keys are interned.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner: KeyInterner<&str> = KeyInterner::new();
/// assert!(interner.is_empty());
///
/// interner.intern(&"key");
/// assert!(!interner.is_empty());
/// ```
#[must_use]
pub fn is_empty(&self) -> bool {
self.keys.is_empty()
}
/// Clears all interned keys and bumps [`generation`](Self::generation).
///
/// After calling this, all previously returned handles become invalid.
/// The internal allocations are retained; use
/// [`clear_shrink`](Self::clear_shrink) to also release spare capacity.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// interner.intern(&"key");
/// assert!(!interner.is_empty());
///
/// let gen_before = interner.generation();
/// interner.clear();
/// assert!(interner.is_empty());
/// assert_ne!(gen_before, interner.generation());
/// ```
pub fn clear(&mut self) {
self.index.clear();
self.keys.clear();
// Use wrapping_add defensively: a caller who clears 2^64 times
// has bigger problems than generation-counter rollover, but we
// refuse to panic in release (overflow-checks = false) either
// way.
self.generation = self.generation.wrapping_add(1);
}
/// Returns an approximate memory footprint in bytes.
///
/// Uses saturating arithmetic internally so that pathological
/// capacities cannot under-report the footprint via `usize` overflow.
/// Under-reporting here would let an attacker bypass any admission-
/// control check that consults `approx_bytes`.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner: KeyInterner<String> = KeyInterner::new();
/// let base_bytes = interner.approx_bytes();
///
/// // Add some keys
/// for i in 0..100 {
/// interner.intern(&format!("key_{}", i));
/// }
///
/// assert!(interner.approx_bytes() > base_bytes);
/// ```
#[must_use]
pub fn approx_bytes(&self) -> usize {
let entry_size = std::mem::size_of::<(K, u64)>();
let key_size = std::mem::size_of::<K>();
let index_bytes = self.index.capacity().saturating_mul(entry_size);
let keys_bytes = self.keys.capacity().saturating_mul(key_size);
std::mem::size_of::<Self>()
.saturating_add(index_bytes)
.saturating_add(keys_bytes)
}
/// Returns an iterator over (handle, key) pairs in insertion order.
///
/// # Example
///
/// ```
/// use cachekit::ds::KeyInterner;
///
/// let mut interner = KeyInterner::new();
/// interner.intern(&"a");
/// interner.intern(&"b");
///
/// let pairs: Vec<_> = interner.iter().collect();
/// assert_eq!(pairs, vec![(0, &"a"), (1, &"b")]);
/// ```
pub fn iter(&self) -> impl Iterator<Item = (u64, &K)> + '_ {
self.keys.iter().enumerate().map(|(i, k)| (i as u64, k))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn key_interner_basic_flow() {
let mut interner: KeyInterner<String> = KeyInterner::new();
assert!(interner.is_empty());
let a = interner.intern(&"a".to_owned());
let b = interner.intern(&"b".to_owned());
let a2 = interner.intern(&"a".to_owned());
assert_eq!(a, a2);
assert_ne!(a, b);
assert_eq!(interner.len(), 2);
assert_eq!(interner.get_handle_borrowed("b"), Some(b));
assert_eq!(interner.resolve(a).map(String::as_str), Some("a"));
}
#[test]
fn key_interner_iter() {
let mut interner: KeyInterner<String> = KeyInterner::new();
interner.intern(&"x".to_owned());
interner.intern(&"y".to_owned());
let mut pairs = interner.iter();
assert_eq!(pairs.next(), Some((0, &"x".to_owned())));
assert_eq!(pairs.next(), Some((1, &"y".to_owned())));
assert_eq!(pairs.next(), None);
}
// =========================================================================
// Security hardening tests
// =========================================================================
#[test]
fn debug_impl_does_not_leak_keys() {
// Guard: the manual `Debug` impl must not dump interned keys.
// Regressing this is an information-disclosure bug.
let mut interner: KeyInterner<String> = KeyInterner::new();
interner.intern(&"sensitive_token_abc123".to_owned());
interner.intern(&"another_secret".to_owned());
let formatted = format!("{interner:?}");
assert!(
!formatted.contains("sensitive_token_abc123"),
"Debug must not include key contents; got: {formatted}"
);
assert!(
!formatted.contains("another_secret"),
"Debug must not include key contents; got: {formatted}"
);
// But it should still be useful for diagnostics.
assert!(formatted.contains("len"), "Debug missing len: {formatted}");
assert!(
formatted.contains("generation"),