avoid overhead of init check for slab deallocation

pull/50/head
Daniel Micay 2018-09-11 14:13:18 -04:00
parent ef098fea06
commit 684291bf6a
1 changed files with 27 additions and 13 deletions

View File

@ -748,11 +748,8 @@ static void *allocate(size_t size) {
return p; return p;
} }
static void deallocate(void *p) { static void deallocate_large(void *p) {
if (p >= ro.slab_region_start && p < ro.slab_region_end) { enforce_init();
slab_free(p);
return;
}
mutex_lock(&regions_lock); mutex_lock(&regions_lock);
struct region_info *region = regions_find(p); struct region_info *region = regions_find(p);
@ -810,7 +807,6 @@ EXPORT void *h_realloc(void *old, size_t size) {
return allocate(size); return allocate(size);
} }
enforce_init();
size = adjust_size_for_canaries(size); size = adjust_size_for_canaries(size);
size_t old_size; size_t old_size;
@ -820,6 +816,8 @@ EXPORT void *h_realloc(void *old, size_t size) {
return old; return old;
} }
} else { } else {
enforce_init();
mutex_lock(&regions_lock); mutex_lock(&regions_lock);
struct region_info *region = regions_find(old); struct region_info *region = regions_find(old);
if (region == NULL) { if (region == NULL) {
@ -892,7 +890,11 @@ EXPORT void *h_realloc(void *old, size_t size) {
copy_size -= canary_size; copy_size -= canary_size;
} }
memcpy(new, old, copy_size); memcpy(new, old, copy_size);
deallocate(old); if (old_size <= max_slab_size_class) {
slab_free(old);
} else {
deallocate_large(old);
}
return new; return new;
} }
@ -981,8 +983,12 @@ EXPORT void h_free(void *p) {
return; return;
} }
enforce_init(); if (p >= ro.slab_region_start && p < ro.slab_region_end) {
deallocate(p); slab_free(p);
return;
}
deallocate_large(p);
} }
EXPORT void h_cfree(void *ptr) ALIAS(h_free); EXPORT void h_cfree(void *ptr) ALIAS(h_free);
@ -992,13 +998,13 @@ EXPORT size_t h_malloc_usable_size(void *p) {
return 0; return 0;
} }
enforce_init();
if (p >= ro.slab_region_start && p < ro.slab_region_end) { if (p >= ro.slab_region_start && p < ro.slab_region_end) {
size_t size = slab_usable_size(p); size_t size = slab_usable_size(p);
return size ? size - canary_size : 0; return size ? size - canary_size : 0;
} }
enforce_init();
mutex_lock(&regions_lock); mutex_lock(&regions_lock);
struct region_info *region = regions_find(p); struct region_info *region = regions_find(p);
if (p == NULL) { if (p == NULL) {
@ -1011,7 +1017,7 @@ EXPORT size_t h_malloc_usable_size(void *p) {
} }
EXPORT size_t h_malloc_object_size(void *p) { EXPORT size_t h_malloc_object_size(void *p) {
if (p == NULL || unlikely(!is_init())) { if (p == NULL) {
return 0; return 0;
} }
@ -1020,6 +1026,10 @@ EXPORT size_t h_malloc_object_size(void *p) {
return size ? size - canary_size : 0; return size ? size - canary_size : 0;
} }
if (unlikely(!is_init())) {
return 0;
}
mutex_lock(&regions_lock); mutex_lock(&regions_lock);
struct region_info *region = regions_find(p); struct region_info *region = regions_find(p);
size_t size = p == NULL ? SIZE_MAX : region->size; size_t size = p == NULL ? SIZE_MAX : region->size;
@ -1029,7 +1039,7 @@ EXPORT size_t h_malloc_object_size(void *p) {
} }
EXPORT size_t h_malloc_object_size_fast(void *p) { EXPORT size_t h_malloc_object_size_fast(void *p) {
if (p == NULL || unlikely(!is_init())) { if (p == NULL) {
return 0; return 0;
} }
@ -1038,6 +1048,10 @@ EXPORT size_t h_malloc_object_size_fast(void *p) {
return size ? size - canary_size : 0; return size ? size - canary_size : 0;
} }
if (unlikely(!is_init())) {
return 0;
}
return SIZE_MAX; return SIZE_MAX;
} }