add initial guard slabs implementation
parent
cc1e79fdba
commit
1be74ec40d
|
@ -83,8 +83,8 @@ allocation and then unmapped on free.
|
||||||
* High entropy per-slab random values
|
* High entropy per-slab random values
|
||||||
* [in-progress] Mangled into a unique value per slab slot (although not
|
* [in-progress] Mangled into a unique value per slab slot (although not
|
||||||
with a strong keyed hash due to performance limitations)
|
with a strong keyed hash due to performance limitations)
|
||||||
* [in-progress] Some slab locations are skipped and remain memory protected,
|
* Possible slab locations are skipped and remain memory protected, leaving slab
|
||||||
leaving slab size class regions interspersed with guard pages
|
size class regions interspersed with guard pages
|
||||||
* Zero size allocations are memory protected
|
* Zero size allocations are memory protected
|
||||||
* Protected allocator metadata
|
* Protected allocator metadata
|
||||||
* Address space for metadata is never used for allocations and vice versa
|
* Address space for metadata is never used for allocations and vice versa
|
||||||
|
|
9
malloc.c
9
malloc.c
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
static_assert(sizeof(void *) == 8, "64-bit only");
|
static_assert(sizeof(void *) == 8, "64-bit only");
|
||||||
|
|
||||||
|
static const bool guard_slabs = true;
|
||||||
|
|
||||||
// either sizeof(uint64_t) or 0
|
// either sizeof(uint64_t) or 0
|
||||||
static const size_t canary_size = sizeof(uint64_t);
|
static const size_t canary_size = sizeof(uint64_t);
|
||||||
|
|
||||||
|
@ -140,9 +142,9 @@ static size_t get_metadata_max(size_t slab_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_size, bool non_zero_size) {
|
static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_size, bool non_zero_size) {
|
||||||
if (unlikely(c->metadata_count == c->metadata_allocated)) {
|
if (unlikely(c->metadata_count >= c->metadata_allocated)) {
|
||||||
size_t metadata_max = get_metadata_max(slab_size);
|
size_t metadata_max = get_metadata_max(slab_size);
|
||||||
if (c->metadata_count == metadata_max) {
|
if (c->metadata_count >= metadata_max) {
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -162,6 +164,9 @@ static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_si
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
c->metadata_count++;
|
c->metadata_count++;
|
||||||
|
if (guard_slabs) {
|
||||||
|
c->metadata_count++;
|
||||||
|
}
|
||||||
return metadata;
|
return metadata;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue