limit cached slabs based on max size class

pull/109/head
Daniel Micay 2020-05-13 01:05:37 -04:00
parent cf55ac0f6d
commit 4a6bbe445c
1 changed files with 12 additions and 12 deletions

View File

@ -108,10 +108,10 @@ static const size_t min_align = 16;
#define MIN_SLAB_SIZE_CLASS_SHIFT 4 #define MIN_SLAB_SIZE_CLASS_SHIFT 4
#if !CONFIG_EXTENDED_SIZE_CLASSES #if !CONFIG_EXTENDED_SIZE_CLASSES
static const size_t max_slab_size_class = 16384; static const size_t MAX_SLAB_SIZE_CLASS = 16384;
#define MAX_SLAB_SIZE_CLASS_SHIFT 14 #define MAX_SLAB_SIZE_CLASS_SHIFT 14
#else #else
static const size_t max_slab_size_class = 131072; static const size_t MAX_SLAB_SIZE_CLASS = 131072;
#define MAX_SLAB_SIZE_CLASS_SHIFT 17 #define MAX_SLAB_SIZE_CLASS_SHIFT 17
#endif #endif
@ -210,7 +210,7 @@ static size_t get_slab_size(size_t slots, size_t size) {
} }
// limit on the number of cached empty slabs before attempting purging instead // limit on the number of cached empty slabs before attempting purging instead
static const size_t max_empty_slabs_total = 128 * 1024; static const size_t max_empty_slabs_total = MAX_SLAB_SIZE_CLASS;
struct __attribute__((aligned(CACHELINE_SIZE))) size_class { struct __attribute__((aligned(CACHELINE_SIZE))) size_class {
struct mutex lock; struct mutex lock;
@ -1216,7 +1216,7 @@ static void *allocate_large(size_t size) {
} }
static inline void *allocate(unsigned arena, size_t size) { static inline void *allocate(unsigned arena, size_t size) {
return size <= max_slab_size_class ? allocate_small(arena, size) : allocate_large(size); return size <= MAX_SLAB_SIZE_CLASS ? allocate_small(arena, size) : allocate_large(size);
} }
static void deallocate_large(void *p, const size_t *expected_size) { static void deallocate_large(void *p, const size_t *expected_size) {
@ -1248,7 +1248,7 @@ static int alloc_aligned(unsigned arena, void **memptr, size_t alignment, size_t
} }
if (alignment <= PAGE_SIZE) { if (alignment <= PAGE_SIZE) {
if (size <= max_slab_size_class && alignment > min_align) { if (size <= MAX_SLAB_SIZE_CLASS && alignment > min_align) {
size = get_size_info_align(size, alignment).size; size = get_size_info_align(size, alignment).size;
} }
@ -1299,7 +1299,7 @@ static void *alloc_aligned_simple(unsigned arena, size_t alignment, size_t size)
} }
static size_t adjust_size_for_canaries(size_t size) { static size_t adjust_size_for_canaries(size_t size) {
if (size > 0 && size <= max_slab_size_class) { if (size > 0 && size <= MAX_SLAB_SIZE_CLASS) {
return size + canary_size; return size + canary_size;
} }
return size; return size;
@ -1329,7 +1329,7 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) {
total_size = adjust_size_for_canaries(total_size); total_size = adjust_size_for_canaries(total_size);
void *p = allocate(arena, total_size); void *p = allocate(arena, total_size);
thread_seal_metadata(); thread_seal_metadata();
if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= max_slab_size_class) { if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= MAX_SLAB_SIZE_CLASS) {
memset(p, 0, total_size - canary_size); memset(p, 0, total_size - canary_size);
} }
return p; return p;
@ -1342,7 +1342,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
size = adjust_size_for_canaries(size); size = adjust_size_for_canaries(size);
if (size > max_slab_size_class) { if (size > MAX_SLAB_SIZE_CLASS) {
size = get_large_size_class(size); size = get_large_size_class(size);
if (unlikely(!size)) { if (unlikely(!size)) {
errno = ENOMEM; errno = ENOMEM;
@ -1353,7 +1353,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
size_t old_size; size_t old_size;
if (old >= get_slab_region_start() && old < ro.slab_region_end) { if (old >= get_slab_region_start() && old < ro.slab_region_end) {
old_size = slab_usable_size(old); old_size = slab_usable_size(old);
if (size <= max_slab_size_class && get_size_info(size).size == old_size) { if (size <= MAX_SLAB_SIZE_CLASS && get_size_info(size).size == old_size) {
return old; return old;
} }
thread_unseal_metadata(); thread_unseal_metadata();
@ -1377,7 +1377,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
} }
mutex_unlock(&ra->lock); mutex_unlock(&ra->lock);
if (size > max_slab_size_class) { if (size > MAX_SLAB_SIZE_CLASS) {
// in-place shrink // in-place shrink
if (size < old_size) { if (size < old_size) {
void *new_end = (char *)old + size; void *new_end = (char *)old + size;
@ -1461,11 +1461,11 @@ EXPORT void *h_realloc(void *old, size_t size) {
return NULL; return NULL;
} }
size_t copy_size = min(size, old_size); size_t copy_size = min(size, old_size);
if (copy_size > 0 && copy_size <= max_slab_size_class) { if (copy_size > 0 && copy_size <= MAX_SLAB_SIZE_CLASS) {
copy_size -= canary_size; copy_size -= canary_size;
} }
memcpy(new, old, copy_size); memcpy(new, old, copy_size);
if (old_size <= max_slab_size_class) { if (old_size <= MAX_SLAB_SIZE_CLASS) {
deallocate_small(old, NULL); deallocate_small(old, NULL);
} else { } else {
deallocate_large(old, NULL); deallocate_large(old, NULL);