add initial malloc_object_size extensions

pull/50/head
Daniel Micay 2018-08-29 13:43:35 -04:00
parent 00b2613e16
commit 2684a98eab
2 changed files with 42 additions and 1 deletions

View File

@ -667,6 +667,10 @@ static inline void enforce_init(void) {
}
}
static inline bool is_init(void) {
return atomic_load_explicit(&ro.initialized, memory_order_acquire);
}
static size_t get_guard_size(struct random_state *state, size_t size) {
return (get_random_u64_uniform(state, size / PAGE_SIZE / 8) + 1) * PAGE_SIZE;
}
@ -918,6 +922,35 @@ EXPORT size_t h_malloc_usable_size(void *p) {
return size;
}
EXPORT size_t h_malloc_object_size(void *p) {
if (p == NULL || !is_init()) {
return 0;
}
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
return slab_usable_size(p);
}
pthread_mutex_lock(&regions_lock);
struct region_info *region = regions_find(p);
size_t size = p == NULL ? SIZE_MAX : region->size;
pthread_mutex_unlock(&regions_lock);
return size;
}
EXPORT size_t h_malloc_object_size_fast(void *p) {
if (p == NULL || !is_init()) {
return 0;
}
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
return slab_usable_size(p);
}
return SIZE_MAX;
}
EXPORT int h_mallopt(UNUSED int param, UNUSED int value) {
return 0;
}
@ -929,7 +962,7 @@ EXPORT int h_malloc_trim(size_t pad) {
return 0;
}
if (!atomic_load_explicit(&ro.initialized, memory_order_acquire)) {
if (!is_init()) {
return 0;
}

View File

@ -48,4 +48,12 @@ void *h_valloc(size_t size);
void *h_pvalloc(size_t size);
void h_cfree(void *ptr);
// custom extensions
// return an upper bound on object size for any pointer based on malloc metadata
size_t h_malloc_object_size(void *ptr);
// similar to malloc_object_size, but avoiding locking so the results are much more limited
size_t h_malloc_object_size_fast(void *ptr);
#endif