add quarantine for large allocations

pull/50/head
Daniel Micay 2018-10-08 15:50:31 -04:00
parent cc9699f1b4
commit 1a10c17e8b
3 changed files with 38 additions and 5 deletions

View File

@ -75,6 +75,8 @@ features with a significant performance or memory usage cost.
#define SLAB_CANARY true #define SLAB_CANARY true
#define GUARD_SLABS_INTERVAL 1 #define GUARD_SLABS_INTERVAL 1
#define GUARD_SIZE_DIVISOR 2 #define GUARD_SIZE_DIVISOR 2
#define REGION_QUARANTINE_SIZE 1024
#define REGION_QUARANTINE_SKIP_THRESHOLD (32 * 1024 * 1024)
``` ```
There will be more control over enabled features in the future along with There will be more control over enabled features in the future along with
@ -127,7 +129,12 @@ allocation and then unmapped on free.
* [in-progress] Randomized delayed free for slab allocations * [in-progress] Randomized delayed free for slab allocations
* [in-progress] Randomized allocation of slabs * [in-progress] Randomized allocation of slabs
* [more randomization coming as the implementation is matured] * [more randomization coming as the implementation is matured]
* Slab allocations are zeroed on free and large allocations are unmapped * Slab allocations are zeroed on free
* Large allocations are purged and memory protected on free with the memory
mapping kept reserved in a quarantine to detect use-after-free
* The quarantine is a FIFO ring buffer, with the oldest mapping in the
quarantine being unmapped to make room for the most recently freed
mapping
* Detection of write-after-free by verifying zero filling is intact * Detection of write-after-free by verifying zero filling is intact
* Memory in fresh allocations is consistently zeroed due to it either being * Memory in fresh allocations is consistently zeroed due to it either being
fresh pages or zeroed on free after previous usage fresh pages or zeroed on free after previous usage

View File

@ -9,5 +9,7 @@
#define SLAB_CANARY true #define SLAB_CANARY true
#define GUARD_SLABS_INTERVAL 1 #define GUARD_SLABS_INTERVAL 1
#define GUARD_SIZE_DIVISOR 2 #define GUARD_SIZE_DIVISOR 2
#define REGION_QUARANTINE_SIZE 1024
#define REGION_QUARANTINE_SKIP_THRESHOLD (32 * 1024 * 1024)
#endif #endif

View File

@ -522,6 +522,27 @@ static struct region_info *regions;
static size_t regions_total = initial_region_table_size; static size_t regions_total = initial_region_table_size;
static size_t regions_free = initial_region_table_size; static size_t regions_free = initial_region_table_size;
static struct mutex regions_lock = MUTEX_INITIALIZER; static struct mutex regions_lock = MUTEX_INITIALIZER;
static struct region_info regions_quarantine[REGION_QUARANTINE_SIZE];
static size_t regions_quarantine_index;
static void regions_quarantine_deallocate_pages(void *p, size_t size, size_t guard_size) {
if (size >= REGION_QUARANTINE_SKIP_THRESHOLD) {
deallocate_pages(p, size, guard_size);
return;
}
if (unlikely(memory_map_fixed(p, size))) {
deallocate_pages(p, size, guard_size);
return;
}
struct region_info old = regions_quarantine[regions_quarantine_index];
if (old.p != NULL) {
deallocate_pages(old.p, old.size, old.guard_size);
}
regions_quarantine[regions_quarantine_index] = (struct region_info){p, size, guard_size};
regions_quarantine_index = (regions_quarantine_index + 1) % REGION_QUARANTINE_SIZE;
}
static size_t hash_page(void *p) { static size_t hash_page(void *p) {
uintptr_t u = (uintptr_t)p >> PAGE_SHIFT; uintptr_t u = (uintptr_t)p >> PAGE_SHIFT;
@ -792,7 +813,7 @@ static void deallocate_large(void *p, size_t *expected_size) {
regions_delete(region); regions_delete(region);
mutex_unlock(&regions_lock); mutex_unlock(&regions_lock);
deallocate_pages(p, size, guard_size); regions_quarantine_deallocate_pages(p, size, guard_size);
} }
static size_t adjust_size_for_canaries(size_t size) { static size_t adjust_size_for_canaries(size_t size) {
@ -829,7 +850,10 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) {
return p; return p;
} }
static const size_t mremap_threshold = 4 * 1024 * 1024; #define MREMAP_THRESHOLD (32 * 1024 * 1024)
static_assert(MREMAP_THRESHOLD >= REGION_QUARANTINE_SKIP_THRESHOLD,
"mremap threshold must be above region quarantine limit");
EXPORT void *h_realloc(void *old, size_t size) { EXPORT void *h_realloc(void *old, size_t size) {
if (old == NULL) { if (old == NULL) {
@ -874,7 +898,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
return NULL; return NULL;
} }
void *new_guard_end = (char *)new_end + old_guard_size; void *new_guard_end = (char *)new_end + old_guard_size;
memory_unmap(new_guard_end, old_rounded_size - rounded_size); regions_quarantine_deallocate_pages(new_guard_end, old_rounded_size - rounded_size, 0);
mutex_lock(&regions_lock); mutex_lock(&regions_lock);
struct region_info *region = regions_find(old); struct region_info *region = regions_find(old);
@ -907,7 +931,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
} }
size_t copy_size = size < old_size ? size : old_size; size_t copy_size = size < old_size ? size : old_size;
if (copy_size >= mremap_threshold) { if (copy_size >= MREMAP_THRESHOLD) {
void *new = allocate(size); void *new = allocate(size);
if (new == NULL) { if (new == NULL) {
return NULL; return NULL;