Skip to content

Commit 4e88b42

Browse files
authored
Towards heap walk (#569)
* Implement tracking full slabs and large allocations This adds an additional SeqSet that is used to track all the fully used slabs and large allocations. This gives more chances to detect memory leaks, and additionally catch some more UAF failures where the object is not recycled. * Make slabmeta track a slab interior pointer Use the head of the free list builder to track an interior pointer to the slab. This is unused unless the list contains something. Hence, we can use this to represent an interior pointer to the slab and report more accurate leaks. * clangformat * clangtidy * clangtidy * Clang tidy again. * Fixing provenance. * Clangformat * Clang tidy. * Add assert for sanity * Make reinterpret_cast more descriptive. Add an operation to get a tag free pointer from an address_t, and use it * Clangformat * CR * Fix calculation of number of allocations. * Fix calculation of number of allocations. * Fix test
1 parent 704843d commit 4e88b42

8 files changed

Lines changed: 199 additions & 51 deletions

File tree

src/snmalloc/aal/address.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -290,4 +290,14 @@ namespace snmalloc
290290
return static_cast<size_t>(a - pointer_align_down<alignment>(a));
291291
}
292292

293+
/**
294+
* Convert an address_t to a pointer. The returned pointer should never be
295+
* followed. On CHERI following this pointer will result in a capability
296+
* violation.
297+
*/
298+
template<typename T>
299+
SNMALLOC_FAST_PATH_INLINE T* useless_ptr_from_addr(address_t p)
300+
{
301+
return reinterpret_cast<T*>(static_cast<uintptr_t>(p));
302+
}
293303
} // namespace snmalloc

src/snmalloc/ds_core/seqset.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,12 @@ namespace snmalloc
8383
#endif
8484
}
8585

86+
public:
87+
/**
88+
* Empty queue
89+
*/
90+
constexpr SeqSet() = default;
91+
8692
/**
8793
* Check for empty
8894
*/
@@ -95,12 +101,6 @@ namespace snmalloc
95101
return head.next == &head;
96102
}
97103

98-
public:
99-
/**
100-
* Empty queue
101-
*/
102-
constexpr SeqSet() = default;
103-
104104
/**
105105
* Remove an element from the queue
106106
*

src/snmalloc/mem/corealloc.h

Lines changed: 61 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,12 @@ namespace snmalloc
6161
uint16_t length = 0;
6262
} alloc_classes[NUM_SMALL_SIZECLASSES]{};
6363

64+
/**
65+
* The set of all slabs and large allocations
66+
* from this allocator that are full or almost full.
67+
*/
68+
SeqSet<BackendSlabMetadata> laden{};
69+
6470
/**
6571
* Local entropy source and current version of keys for
6672
* this thread
@@ -420,6 +426,9 @@ namespace snmalloc
420426
UNUSED(size);
421427
#endif
422428

429+
// Remove from set of fully used slabs.
430+
meta->node.remove();
431+
423432
Config::Backend::dealloc_chunk(
424433
get_backend_local_state(), *meta, p, size);
425434

@@ -436,6 +445,9 @@ namespace snmalloc
436445
// Wake slab up.
437446
meta->set_not_sleeping(sizeclass);
438447

448+
// Remove from set of fully used slabs.
449+
meta->node.remove();
450+
439451
alloc_classes[sizeclass].available.insert(meta);
440452
alloc_classes[sizeclass].length++;
441453

@@ -744,6 +756,10 @@ namespace snmalloc
744756
alloc_classes[sizeclass].length++;
745757
sl.insert(meta);
746758
}
759+
else
760+
{
761+
laden.insert(meta);
762+
}
747763

748764
auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
749765
return ticker.check_tick(r);
@@ -794,7 +810,8 @@ namespace snmalloc
794810
}
795811

796812
// Set meta slab to empty.
797-
meta->initialise(sizeclass);
813+
meta->initialise(
814+
sizeclass, address_cast(slab), entropy.get_free_list_key());
798815

799816
// Build a free list for the slab
800817
alloc_new_list(slab, meta, rsize, slab_size, entropy);
@@ -811,6 +828,10 @@ namespace snmalloc
811828
alloc_classes[sizeclass].length++;
812829
alloc_classes[sizeclass].available.insert(meta);
813830
}
831+
else
832+
{
833+
laden.insert(meta);
834+
}
814835

815836
auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
816837
return ticker.check_tick(r);
@@ -864,6 +885,14 @@ namespace snmalloc
864885
dealloc_local_slabs<true>(sizeclass);
865886
}
866887

888+
laden.iterate([this, domesticate](
889+
BackendSlabMetadata* meta) SNMALLOC_FAST_PATH_LAMBDA {
890+
if (!meta->is_large())
891+
{
892+
meta->free_queue.validate(entropy.get_free_list_key(), domesticate);
893+
}
894+
});
895+
867896
return posted;
868897
}
869898

@@ -883,7 +912,7 @@ namespace snmalloc
883912
c->remote_allocator = public_state();
884913

885914
// Set up remote cache.
886-
c->remote_dealloc_cache.init();
915+
c->remote_dealloc_cache.init(entropy.get_free_list_key());
887916
}
888917

889918
/**
@@ -892,28 +921,46 @@ namespace snmalloc
892921
*/
893922
bool debug_is_empty_impl(bool* result)
894923
{
895-
auto test = [&result](auto& queue, smallsizeclass_t size_class) {
896-
queue.iterate([&result, size_class](auto slab_metadata) {
924+
auto& key = entropy.get_free_list_key();
925+
926+
auto error = [&result, &key](auto slab_metadata) {
927+
auto slab_interior = slab_metadata->get_slab_interior(key);
928+
const PagemapEntry& entry =
929+
Config::Backend::get_metaentry(slab_interior);
930+
SNMALLOC_ASSERT(slab_metadata == entry.get_slab_metadata());
931+
auto size_class = entry.get_sizeclass();
932+
auto slab_size = sizeclass_full_to_slab_size(size_class);
933+
auto slab_start = bits::align_down(slab_interior, slab_size);
934+
935+
if (result != nullptr)
936+
*result = false;
937+
else
938+
report_fatal_error(
939+
"debug_is_empty: found non-empty allocator: size={} on "
940+
"slab_start {}",
941+
sizeclass_full_to_size(size_class),
942+
slab_start);
943+
};
944+
945+
auto test = [&error](auto& queue) {
946+
queue.iterate([&error](auto slab_metadata) {
897947
if (slab_metadata->needed() != 0)
898948
{
899-
if (result != nullptr)
900-
*result = false;
901-
else
902-
report_fatal_error(
903-
"debug_is_empty: found non-empty allocator: size={} ({})",
904-
sizeclass_to_size(size_class),
905-
size_class);
949+
error(slab_metadata);
906950
}
907951
});
908952
};
909953

910954
bool sent_something = flush(true);
911955

912-
smallsizeclass_t size_class = 0;
913956
for (auto& alloc_class : alloc_classes)
914957
{
915-
test(alloc_class.available, size_class);
916-
size_class++;
958+
test(alloc_class.available);
959+
}
960+
961+
if (!laden.is_empty())
962+
{
963+
error(laden.peek());
917964
}
918965

919966
// Place the static stub message on the queue.

src/snmalloc/mem/freelist.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ namespace snmalloc
115115
class T
116116
{
117117
template<
118-
bool,
119118
bool,
120119
SNMALLOC_CONCEPT(capptr::IsBound),
121120
SNMALLOC_CONCEPT(capptr::IsBound)>
@@ -220,7 +219,6 @@ namespace snmalloc
220219
return reinterpret_cast<Object::T<BQueue>*>(ptr);
221220
}
222221

223-
private:
224222
/**
225223
* Involutive encryption with raw pointers
226224
*/
@@ -247,7 +245,6 @@ namespace snmalloc
247245
}
248246
}
249247

250-
public:
251248
/**
252249
* Encode next. We perform two convenient little bits of type-level
253250
* sleight of hand here:
@@ -506,7 +503,6 @@ namespace snmalloc
506503
*/
507504
template<
508505
bool RANDOM,
509-
bool INIT = true,
510506
SNMALLOC_CONCEPT(capptr::IsBound) BView = capptr::bounds::Alloc,
511507
SNMALLOC_CONCEPT(capptr::IsBound) BQueue = capptr::bounds::AllocWild>
512508
class Builder
@@ -532,7 +528,7 @@ namespace snmalloc
532528
// This enables branch free enqueuing.
533529
std::array<void**, LENGTH> end{nullptr};
534530

535-
Object::BQueuePtr<BQueue>* cast_end(uint32_t ix)
531+
[[nodiscard]] Object::BQueuePtr<BQueue>* cast_end(uint32_t ix) const
536532
{
537533
return reinterpret_cast<Object::BQueuePtr<BQueue>*>(end[ix]);
538534
}
@@ -542,7 +538,7 @@ namespace snmalloc
542538
end[ix] = reinterpret_cast<void**>(p);
543539
}
544540

545-
Object::BHeadPtr<BView, BQueue> cast_head(uint32_t ix)
541+
[[nodiscard]] Object::BHeadPtr<BView, BQueue> cast_head(uint32_t ix) const
546542
{
547543
return Object::BHeadPtr<BView, BQueue>::unsafe_from(
548544
static_cast<Object::T<BQueue>*>(head[ix]));
@@ -551,13 +547,7 @@ namespace snmalloc
551547
std::array<uint16_t, RANDOM ? 2 : 0> length{};
552548

553549
public:
554-
constexpr Builder()
555-
{
556-
if (INIT)
557-
{
558-
init();
559-
}
560-
}
550+
constexpr Builder() = default;
561551

562552
/**
563553
* Checks if the builder contains any elements.
@@ -629,8 +619,8 @@ namespace snmalloc
629619
* and is thus subject to encoding if the next_object pointers
630620
* encoded.
631621
*/
632-
Object::BHeadPtr<BView, BQueue>
633-
read_head(uint32_t index, const FreeListKey& key)
622+
[[nodiscard]] Object::BHeadPtr<BView, BQueue>
623+
read_head(uint32_t index, const FreeListKey& key) const
634624
{
635625
return Object::decode_next(
636626
address_cast(&head[index]), cast_head(index), key);
@@ -688,7 +678,7 @@ namespace snmalloc
688678
/**
689679
* Set the builder to a not building state.
690680
*/
691-
constexpr void init()
681+
constexpr void init(address_t slab, const FreeListKey& key)
692682
{
693683
for (size_t i = 0; i < LENGTH; i++)
694684
{
@@ -697,6 +687,16 @@ namespace snmalloc
697687
{
698688
length[i] = 0;
699689
}
690+
691+
// Head is not live when a building is initialised.
692+
// We use this slot to store a pointer into the slab for the
693+
// allocations. This then establishes the invariant that head is
694+
// always (a possibly encoded) pointer into the slab, and thus
695+
// the Freelist builder always knows which block it is referring too.
696+
head[i] = Object::code_next(
697+
address_cast(&head[i]),
698+
useless_ptr_from_addr<Object::T<BQueue>>(slab),
699+
key);
700700
}
701701
}
702702

@@ -718,7 +718,7 @@ namespace snmalloc
718718
// empty, but you are not allowed to call this in the empty case.
719719
auto last = Object::BHeadPtr<BView, BQueue>::unsafe_from(
720720
Object::from_next_ptr(cast_end(0)));
721-
init();
721+
init(address_cast(head[0]), key);
722722
return {first, last};
723723
}
724724

src/snmalloc/mem/localalloc.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,11 @@ namespace snmalloc
199199

200200
// Initialise meta data for a successful large allocation.
201201
if (meta != nullptr)
202-
meta->initialise_large();
202+
{
203+
meta->initialise_large(
204+
address_cast(chunk), local_cache.entropy.get_free_list_key());
205+
core_alloc->laden.insert(meta);
206+
}
203207

204208
if (zero_mem == YesZero && chunk.unsafe_ptr() != nullptr)
205209
{

src/snmalloc/mem/metadata.h

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -440,12 +440,13 @@ namespace snmalloc
440440
/**
441441
* Initialise FrontendSlabMetadata for a slab.
442442
*/
443-
void initialise(smallsizeclass_t sizeclass)
443+
void initialise(
444+
smallsizeclass_t sizeclass, address_t slab, const FreeListKey& key)
444445
{
445446
static_assert(
446447
std::is_base_of<FrontendSlabMetadata_Trait, BackendType>::value,
447448
"Template should be a subclass of FrontendSlabMetadata");
448-
free_queue.init();
449+
free_queue.init(slab, key);
449450
// Set up meta data as if the entire slab has been turned into a free
450451
// list. This means we don't have to check for special cases where we have
451452
// returned all the elements, but this is a slab that is still being bump
@@ -461,10 +462,10 @@ namespace snmalloc
461462
*
462463
* Set needed so immediately moves to slow path.
463464
*/
464-
void initialise_large()
465+
void initialise_large(address_t slab, const FreeListKey& key)
465466
{
466467
// We will push to this just to make the fast path clean.
467-
free_queue.init();
468+
free_queue.init(slab, key);
468469

469470
// Flag to detect that it is a large alloc on the slow path
470471
large_ = true;
@@ -579,6 +580,13 @@ namespace snmalloc
579580

580581
return {p, !sleeping};
581582
}
583+
584+
// Returns a pointer to somewhere in the slab. May not be the
585+
// start of the slab.
586+
[[nodiscard]] address_t get_slab_interior(const FreeListKey& key) const
587+
{
588+
return address_cast(free_queue.read_head(0, key));
589+
}
582590
};
583591

584592
/**

src/snmalloc/mem/remotecache.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ namespace snmalloc
1717
*/
1818
struct RemoteDeallocCache
1919
{
20-
std::array<freelist::Builder<false, false>, REMOTE_SLOTS> list;
20+
std::array<freelist::Builder<false>, REMOTE_SLOTS> list;
2121

2222
/**
2323
* The total amount of memory we are waiting for before we will dispatch
@@ -165,14 +165,16 @@ namespace snmalloc
165165
* Must be called before anything else to ensure actually initialised
166166
* not just zero init.
167167
*/
168-
void init()
168+
void init(const FreeListKey& key)
169169
{
170170
#ifndef NDEBUG
171171
initialised = true;
172172
#endif
173173
for (auto& l : list)
174174
{
175-
l.init();
175+
// We do not need to initialise with a particular slab, so pass
176+
// a null address.
177+
l.init(0, key);
176178
}
177179
capacity = REMOTE_CACHE;
178180
}

0 commit comments

Comments
 (0)