From 1be74ec40d9ccdff4b1e2eb5e97604557e308cbb Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Thu, 6 Sep 2018 18:53:06 -0400 Subject: [PATCH] add initial guard slabs implementation --- README.md | 4 ++-- malloc.c | 9 +++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e2f8693..6482496 100644 --- a/README.md +++ b/README.md @@ -83,8 +83,8 @@ allocation and then unmapped on free. * High entropy per-slab random values * [in-progress] Mangled into a unique value per slab slot (although not with a strong keyed hash due to performance limitations) -* [in-progress] Some slab locations are skipped and remain memory protected, - leaving slab size class regions interspersed with guard pages +* Possible slab locations are skipped and remain memory protected, leaving slab + size class regions interspersed with guard pages * Zero size allocations are memory protected * Protected allocator metadata * Address space for metadata is never used for allocations and vice versa diff --git a/malloc.c b/malloc.c index d184f1d..8492fa4 100644 --- a/malloc.c +++ b/malloc.c @@ -19,6 +19,8 @@ static_assert(sizeof(void *) == 8, "64-bit only"); +static const bool guard_slabs = true; + // either sizeof(uint64_t) or 0 static const size_t canary_size = sizeof(uint64_t); @@ -140,9 +142,9 @@ static size_t get_metadata_max(size_t slab_size) { } static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_size, bool non_zero_size) { - if (unlikely(c->metadata_count == c->metadata_allocated)) { + if (unlikely(c->metadata_count >= c->metadata_allocated)) { size_t metadata_max = get_metadata_max(slab_size); - if (c->metadata_count == metadata_max) { + if (c->metadata_count >= metadata_max) { errno = ENOMEM; return NULL; } @@ -162,6 +164,9 @@ static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_si return NULL; } c->metadata_count++; + if (guard_slabs) { + c->metadata_count++; + } return metadata; }