zero leading byte of canaries

pull/50/head
Daniel Micay 2018-10-03 17:09:57 -04:00
parent 1fbf0e27f5
commit b24569b6ca
2 changed files with 8 additions and 1 deletions

View File

@ -84,6 +84,7 @@ allocation and then unmapped on free.
* Random canaries placed after each slab allocation to *absorb* * Random canaries placed after each slab allocation to *absorb*
and then later detect overflows/underflows and then later detect overflows/underflows
* High entropy per-slab random values * High entropy per-slab random values
* Leading byte is zeroed to contain C string overflows
* [in-progress] Mangled into a unique value per slab slot (although not * [in-progress] Mangled into a unique value per slab slot (although not
with a strong keyed hash due to performance limitations) with a strong keyed hash due to performance limitations)
* Possible slab locations are skipped and remain memory protected, leaving slab * Possible slab locations are skipped and remain memory protected, leaving slab

View File

@ -274,6 +274,12 @@ static void write_after_free_check(char *p, size_t size) {
} }
} }
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
static const uint64_t canary_mask = 0xffffffffffffff00UL;
#else
static const uint64_t canary_mask = 0x00ffffffffffffffUL;
#endif
static void set_canary(struct slab_metadata *metadata, void *p, size_t size) { static void set_canary(struct slab_metadata *metadata, void *p, size_t size) {
memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size); memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size);
} }
@ -345,7 +351,7 @@ static inline void *allocate_small(size_t requested_size) {
mutex_unlock(&c->lock); mutex_unlock(&c->lock);
return NULL; return NULL;
} }
metadata->canary_value = get_random_u64(&c->rng); metadata->canary_value = get_random_u64(&c->rng) & canary_mask;
c->partial_slabs = metadata; c->partial_slabs = metadata;
void *slab = get_slab(c, slab_size, metadata); void *slab = get_slab(c, slab_size, metadata);