add sanity check for stats option

pull/87/head
Daniel Micay 2019-04-07 09:06:03 -04:00
parent e0891c8cfc
commit ef90f404a6
3 changed files with 22 additions and 18 deletions

View File

@ -26,7 +26,7 @@ common_cflags = [
"-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=32", "-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=32",
"-DCONFIG_CLASS_REGION_SIZE=1073741824", // 1GiB "-DCONFIG_CLASS_REGION_SIZE=1073741824", // 1GiB
"-DN_ARENA=1", "-DN_ARENA=1",
"-DSTATS=false", "-DCONFIG_STATS=false",
] ]
cc_defaults { cc_defaults {

View File

@ -73,6 +73,10 @@ ifeq (,$(filter $(CONFIG_LARGE_SIZE_CLASSES),true false))
$(error CONFIG_LARGE_SIZE_CLASSES must be true or false) $(error CONFIG_LARGE_SIZE_CLASSES must be true or false)
endif endif
ifeq (,$(filter $(CONFIG_STATS),true false))
$(error CONFIG_STATS must be true or false)
endif
CPPFLAGS += \ CPPFLAGS += \
-DZERO_ON_FREE=$(CONFIG_ZERO_ON_FREE) \ -DZERO_ON_FREE=$(CONFIG_ZERO_ON_FREE) \
-DWRITE_AFTER_FREE_CHECK=$(CONFIG_WRITE_AFTER_FREE_CHECK) \ -DWRITE_AFTER_FREE_CHECK=$(CONFIG_WRITE_AFTER_FREE_CHECK) \
@ -89,7 +93,7 @@ CPPFLAGS += \
-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=$(CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH) \ -DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=$(CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH) \
-DCONFIG_CLASS_REGION_SIZE=$(CONFIG_CLASS_REGION_SIZE) \ -DCONFIG_CLASS_REGION_SIZE=$(CONFIG_CLASS_REGION_SIZE) \
-DN_ARENA=$(CONFIG_N_ARENA) \ -DN_ARENA=$(CONFIG_N_ARENA) \
-DSTATS=$(CONFIG_STATS) -DCONFIG_STATS=$(CONFIG_STATS)
libhardened_malloc.so: $(OBJECTS) libhardened_malloc.so: $(OBJECTS)
$(CC) $(CFLAGS) $(LDFLAGS) -shared $^ $(LDLIBS) -o $@ $(CC) $(CFLAGS) $(LDFLAGS) -shared $^ $(LDLIBS) -o $@

View File

@ -211,7 +211,7 @@ struct __attribute__((aligned(CACHELINE_SIZE))) size_class {
struct slab_metadata *free_slabs_tail; struct slab_metadata *free_slabs_tail;
struct slab_metadata *free_slabs_quarantine[FREE_SLABS_QUARANTINE_RANDOM_LENGTH]; struct slab_metadata *free_slabs_quarantine[FREE_SLABS_QUARANTINE_RANDOM_LENGTH];
#if STATS #if CONFIG_STATS
u64 nmalloc; // may wrap (per jemalloc API) u64 nmalloc; // may wrap (per jemalloc API)
u64 ndalloc; // may wrap (per jemalloc API) u64 ndalloc; // may wrap (per jemalloc API)
size_t allocated; size_t allocated;
@ -456,7 +456,7 @@ static inline void *allocate_small(size_t requested_size) {
set_canary(metadata, p, size); set_canary(metadata, p, size);
} }
#if STATS #if CONFIG_STATS
c->allocated += size; c->allocated += size;
c->nmalloc++; c->nmalloc++;
#endif #endif
@ -473,7 +473,7 @@ static inline void *allocate_small(size_t requested_size) {
mutex_unlock(&c->lock); mutex_unlock(&c->lock);
return NULL; return NULL;
} }
#if STATS #if CONFIG_STATS
c->slab_allocated += slab_size; c->slab_allocated += slab_size;
#endif #endif
@ -494,7 +494,7 @@ static inline void *allocate_small(size_t requested_size) {
set_canary(metadata, p, size); set_canary(metadata, p, size);
} }
#if STATS #if CONFIG_STATS
c->allocated += size; c->allocated += size;
c->nmalloc++; c->nmalloc++;
#endif #endif
@ -507,7 +507,7 @@ static inline void *allocate_small(size_t requested_size) {
mutex_unlock(&c->lock); mutex_unlock(&c->lock);
return NULL; return NULL;
} }
#if STATS #if CONFIG_STATS
c->slab_allocated += slab_size; c->slab_allocated += slab_size;
#endif #endif
metadata->canary_value = get_random_canary(&c->rng); metadata->canary_value = get_random_canary(&c->rng);
@ -521,7 +521,7 @@ static inline void *allocate_small(size_t requested_size) {
set_canary(metadata, p, size); set_canary(metadata, p, size);
} }
#if STATS #if CONFIG_STATS
c->allocated += size; c->allocated += size;
c->nmalloc++; c->nmalloc++;
#endif #endif
@ -547,7 +547,7 @@ static inline void *allocate_small(size_t requested_size) {
set_canary(metadata, p, size); set_canary(metadata, p, size);
} }
#if STATS #if CONFIG_STATS
c->allocated += size; c->allocated += size;
c->nmalloc++; c->nmalloc++;
#endif #endif
@ -611,7 +611,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
size_t slab_size = get_slab_size(slots, size); size_t slab_size = get_slab_size(slots, size);
mutex_lock(&c->lock); mutex_lock(&c->lock);
#if STATS #if CONFIG_STATS
c->allocated -= size; c->allocated -= size;
c->ndalloc++; c->ndalloc++;
#endif #endif
@ -715,7 +715,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
if (c->empty_slabs_total + slab_size > max_empty_slabs_total) { if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
if (!memory_map_fixed(slab, slab_size)) { if (!memory_map_fixed(slab, slab_size)) {
memory_set_name(slab, slab_size, size_class_labels[class]); memory_set_name(slab, slab_size, size_class_labels[class]);
#if STATS #if CONFIG_STATS
c->slab_allocated -= slab_size; c->slab_allocated -= slab_size;
#endif #endif
enqueue_free_slab(c, metadata); enqueue_free_slab(c, metadata);
@ -752,7 +752,7 @@ struct region_allocator {
struct region_metadata *regions; struct region_metadata *regions;
size_t total; size_t total;
size_t free; size_t free;
#if STATS #if CONFIG_STATS
size_t allocated; size_t allocated;
#endif #endif
struct quarantine_info quarantine_random[REGION_QUARANTINE_RANDOM_LENGTH]; struct quarantine_info quarantine_random[REGION_QUARANTINE_RANDOM_LENGTH];
@ -1171,7 +1171,7 @@ static void *allocate_large(size_t size) {
deallocate_pages(p, size, guard_size); deallocate_pages(p, size, guard_size);
return NULL; return NULL;
} }
#if STATS #if CONFIG_STATS
ra->allocated += size; ra->allocated += size;
#endif #endif
mutex_unlock(&ra->lock); mutex_unlock(&ra->lock);
@ -1200,7 +1200,7 @@ static void deallocate_large(void *p, const size_t *expected_size) {
} }
size_t guard_size = region->guard_size; size_t guard_size = region->guard_size;
regions_delete(region); regions_delete(region);
#if STATS #if CONFIG_STATS
ra->allocated -= size; ra->allocated -= size;
#endif #endif
mutex_unlock(&ra->lock); mutex_unlock(&ra->lock);
@ -1623,7 +1623,7 @@ EXPORT int h_malloc_trim(UNUSED size_t pad) {
break; break;
} }
memory_set_name(slab, slab_size, size_class_labels[class]); memory_set_name(slab, slab_size, size_class_labels[class]);
#if STATS #if CONFIG_STATS
c->slab_allocated -= slab_size; c->slab_allocated -= slab_size;
#endif #endif
@ -1652,7 +1652,7 @@ EXPORT struct mallinfo h_mallinfo(void) {
struct mallinfo info = {0}; struct mallinfo info = {0};
// glibc mallinfo type definition and implementation are both broken // glibc mallinfo type definition and implementation are both broken
#if STATS && !defined(__GLIBC__) #if CONFIG_STATS && !defined(__GLIBC__)
struct region_allocator *ra = ro.region_allocator; struct region_allocator *ra = ro.region_allocator;
mutex_lock(&ra->lock); mutex_lock(&ra->lock);
info.hblkhd += ra->allocated; info.hblkhd += ra->allocated;
@ -1713,7 +1713,7 @@ EXPORT size_t __mallinfo_nbins(void) {
EXPORT struct mallinfo __mallinfo_arena_info(UNUSED size_t arena) { EXPORT struct mallinfo __mallinfo_arena_info(UNUSED size_t arena) {
struct mallinfo info = {0}; struct mallinfo info = {0};
#if STATS #if CONFIG_STATS
if (arena < N_ARENA) { if (arena < N_ARENA) {
for (unsigned class = 0; class < N_SIZE_CLASSES; class++) { for (unsigned class = 0; class < N_SIZE_CLASSES; class++) {
struct size_class *c = &ro.size_class_metadata[arena][class]; struct size_class *c = &ro.size_class_metadata[arena][class];
@ -1745,7 +1745,7 @@ EXPORT struct mallinfo __mallinfo_arena_info(UNUSED size_t arena) {
EXPORT struct mallinfo __mallinfo_bin_info(UNUSED size_t arena, UNUSED size_t bin) { EXPORT struct mallinfo __mallinfo_bin_info(UNUSED size_t arena, UNUSED size_t bin) {
struct mallinfo info = {0}; struct mallinfo info = {0};
#if STATS #if CONFIG_STATS
if (arena < N_ARENA && bin < N_SIZE_CLASSES) { if (arena < N_ARENA && bin < N_SIZE_CLASSES) {
struct size_class *c = &ro.size_class_metadata[arena][bin]; struct size_class *c = &ro.size_class_metadata[arena][bin];