only use reserved memory for regions hash table

pull/50/head
Daniel Micay 2018-09-01 10:20:23 -04:00
parent c3a4829d77
commit e93d039214
2 changed files with 23 additions and 7 deletions

View File

@ -72,7 +72,9 @@ allocation and then unmapped on free.
* [in-progress] Some slab locations are skipped and remain memory protected, * [in-progress] Some slab locations are skipped and remain memory protected,
leaving slab size class regions interspersed with guard pages leaving slab size class regions interspersed with guard pages
* Zero size allocations are memory protected * Zero size allocations are memory protected
* [mostly in-progress] Protected allocator metadata * Protected allocator metadata
* Address space for metadata is never used for allocations and vice versa
* [implementing stronger protection is in-progress]
* Extension for retrieving the size of allocations with fallback * Extension for retrieving the size of allocations with fallback
to a sentinel for pointers not managed by the allocator to a sentinel for pointers not managed by the allocator
* Can also return accurate values for pointers *within* small allocations * Can also return accurate values for pointers *within* small allocations

View File

@ -104,6 +104,7 @@ static union {
struct { struct {
void *slab_region_start; void *slab_region_start;
void *slab_region_end; void *slab_region_end;
struct region_info *regions[2];
atomic_bool initialized; atomic_bool initialized;
}; };
char padding[PAGE_SIZE]; char padding[PAGE_SIZE];
@ -502,6 +503,7 @@ struct region_info {
}; };
static const size_t initial_region_table_size = 256; static const size_t initial_region_table_size = 256;
static const size_t max_region_table_size = class_region_size / PAGE_SIZE;
static struct random_state regions_rng; static struct random_state regions_rng;
static struct region_info *regions; static struct region_info *regions;
@ -527,8 +529,14 @@ static int regions_grow(void) {
size_t newsize = newtotal * sizeof(struct region_info); size_t newsize = newtotal * sizeof(struct region_info);
size_t mask = newtotal - 1; size_t mask = newtotal - 1;
struct region_info *p = allocate_pages(newsize, PAGE_SIZE, true); if (newtotal > max_region_table_size) {
if (p == NULL) { return 1;
}
struct region_info *p = regions == ro.regions[0] ?
ro.regions[1] : ro.regions[0];
if (memory_protect_rw(p, newsize)) {
return 1; return 1;
} }
@ -543,7 +551,7 @@ static int regions_grow(void) {
} }
} }
deallocate_pages(regions, regions_total * sizeof(struct region_info), PAGE_SIZE); memory_map_fixed(regions, regions_total * sizeof(struct region_info));
regions_free = regions_free + regions_total; regions_free = regions_free + regions_total;
regions_total = newtotal; regions_total = newtotal;
regions = p; regions = p;
@ -656,9 +664,15 @@ COLD static void init_slow_path(void) {
struct random_state rng; struct random_state rng;
random_state_init(&rng); random_state_init(&rng);
regions = allocate_pages(regions_total * sizeof(struct region_info), PAGE_SIZE, true); for (unsigned i = 0; i < 2; i++) {
if (regions == NULL) { ro.regions[i] = allocate_pages(max_region_table_size, PAGE_SIZE, false);
fatal_error("failed to set up allocator"); if (ro.regions[i] == NULL) {
fatal_error("failed to reserve memory for regions table");
}
}
regions = ro.regions[0];
if (memory_protect_rw(regions, regions_total * sizeof(struct region_info))) {
fatal_error("failed to unprotect memory for regions table");
} }
random_state_init(&regions_rng); random_state_init(&regions_rng);