add initial slab allocation quarantine

pull/65/head
Daniel Micay 2018-11-05 16:19:50 -05:00
parent fea335282a
commit 3a488c9a27
3 changed files with 77 additions and 3 deletions

View File

@ -107,6 +107,8 @@ and will be migrated to the main configuration when proper sanity checks and
documentation are written. The following advanced options are available:
```
#define SLAB_QUARANTINE_RANDOM_SIZE 0
#define SLAB_QUARANTINE_QUEUE_SIZE 0
#define GUARD_SLABS_INTERVAL 1
#define GUARD_SIZE_DIVISOR 2
#define REGION_QUARANTINE_RANDOM_SIZE 128
@ -189,7 +191,7 @@ was a bit less important and if a core goal was finding latent bugs.
* Fine-grained randomization within memory regions
* Randomly sized guard regions for large allocations
* Random slot selection within slabs
* [in-progress] Randomized delayed free for slab allocations
* Randomized delayed free for slab allocations
* [in-progress] Randomized allocation of slabs
* [more randomization coming as the implementation is matured]
* Slab allocations are zeroed on free
@ -204,8 +206,7 @@ was a bit less important and if a core goal was finding latent bugs.
* Detection of write-after-free by verifying zero filling is intact
* Memory in fresh allocations is consistently zeroed due to it either being
fresh pages or zeroed on free after previous usage
* [in-progress] Delayed free via a combination of FIFO and randomization for
slab allocations
* Delayed free via a combination of FIFO and randomization for slab allocations
* Random canaries placed after each slab allocation to *absorb*
and then later detect overflows/underflows
* High entropy per-slab random values

View File

@ -3,6 +3,8 @@
#include <stdbool.h>
#define SLAB_QUARANTINE_RANDOM_SIZE 0
#define SLAB_QUARANTINE_QUEUE_SIZE 0
#define GUARD_SLABS_INTERVAL 1
#define GUARD_SIZE_DIVISOR 2
#define REGION_QUARANTINE_RANDOM_SIZE 128

View File

@ -28,6 +28,8 @@ extern int __register_atfork(void (*)(void), void (*)(void), void (*)(void), voi
#define atfork pthread_atfork
#endif
#define SLAB_QUARANTINE (SLAB_QUARANTINE_RANDOM_SIZE > 0 || SLAB_QUARANTINE_QUEUE_SIZE > 0)
static_assert(sizeof(void *) == 8, "64-bit only");
static_assert(!WRITE_AFTER_FREE_CHECK || ZERO_ON_FREE, "WRITE_AFTER_FREE_CHECK depends on ZERO_ON_FREE");
@ -65,6 +67,9 @@ struct slab_metadata {
#ifdef SLAB_METADATA_COUNT
u16 count;
#endif
#if SLAB_QUARANTINE
u64 quarantine[4];
#endif
};
static const size_t min_align = 16;
@ -172,6 +177,15 @@ struct __attribute__((aligned(CACHELINE_SIZE))) size_class {
struct libdivide_u32_t size_divisor;
struct libdivide_u64_t slab_size_divisor;
#if SLAB_QUARANTINE_RANDOM_SIZE > 0
void *quarantine_random[SLAB_QUARANTINE_RANDOM_SIZE];
#endif
#if SLAB_QUARANTINE_QUEUE_SIZE > 0
void *quarantine_queue[SLAB_QUARANTINE_QUEUE_SIZE];
size_t quarantine_queue_index;
#endif
// slabs with at least one allocated slot and at least one free slot
//
// LIFO doubly-linked list
@ -264,6 +278,23 @@ static bool get_slot(struct slab_metadata *metadata, size_t index) {
return (metadata->bitmap[bucket] >> (index - bucket * 64)) & 1UL;
}
#if SLAB_QUARANTINE
static void set_quarantine(struct slab_metadata *metadata, size_t index) {
size_t bucket = index / 64;
metadata->quarantine[bucket] |= 1UL << (index - bucket * 64);
}
static void clear_quarantine(struct slab_metadata *metadata, size_t index) {
size_t bucket = index / 64;
metadata->quarantine[bucket] &= ~(1UL << (index - bucket * 64));
}
static bool get_quarantine(struct slab_metadata *metadata, size_t index) {
size_t bucket = index / 64;
return (metadata->quarantine[bucket] >> (index - bucket * 64)) & 1UL;
}
#endif
static u64 get_mask(size_t slots) {
return slots < 64 ? ~0UL << slots : 0;
}
@ -548,6 +579,46 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
}
}
#if SLAB_QUARANTINE
if (get_quarantine(metadata, slot)) {
fatal_error("double free (quarantine)");
}
set_quarantine(metadata, slot);
#if SLAB_QUARANTINE_RANDOM_SIZE > 0
size_t random_index = get_random_u16_uniform(&c->rng, 16);
void *substitute = c->quarantine_random[random_index];
c->quarantine_random[random_index] = p;
if (substitute == NULL) {
mutex_unlock(&c->lock);
return;
}
p = substitute;
#endif
#if SLAB_QUARANTINE_QUEUE_SIZE > 0
void *substitute = c->quarantine_queue[c->quarantine_queue_index];
c->quarantine_queue[c->quarantine_queue_index] = p;
c->quarantine_queue_index = (c->quarantine_queue_index + 1) % SLAB_QUARANTINE_QUEUE_SIZE;
if (substitute == NULL) {
mutex_unlock(&c->lock);
return;
}
p = substitute;
#endif
metadata = get_metadata(c, p);
slab = get_slab(c, slab_size, metadata);
slot = libdivide_u32_do((char *)p - (char *)slab, &c->size_divisor);
clear_quarantine(metadata, slot);
#endif
if (!has_free_slots(slots, metadata)) {
metadata->next = c->partial_slabs;
metadata->prev = NULL;