move stats accounting to utility functions
parent
02bfcc3b75
commit
9a0de626fc
88
h_malloc.c
88
h_malloc.c
|
@ -425,6 +425,44 @@ static u64 get_random_canary(struct random_state *rng) {
|
||||||
return get_random_u64(rng) & canary_mask;
|
return get_random_u64(rng) & canary_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void stats_small_allocate(UNUSED struct size_class *c, UNUSED size_t size) {
|
||||||
|
#if STATS
|
||||||
|
c->allocated += size;
|
||||||
|
c->nmalloc++;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void stats_small_deallocate(UNUSED struct size_class *c, UNUSED size_t size) {
|
||||||
|
#if STATS
|
||||||
|
c->allocated -= size;
|
||||||
|
c->ndalloc++;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void stats_slab_allocate(UNUSED struct size_class *c, UNUSED size_t slab_size) {
|
||||||
|
#if STATS
|
||||||
|
c->slab_allocated += slab_size;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void stats_slab_deallocate(UNUSED struct size_class *c, UNUSED size_t size) {
|
||||||
|
#if STATS
|
||||||
|
c->slab_allocated -= slab_size;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void stats_large_allocate(UNUSED struct region_allocator *ra, UNUSED size_t size) {
|
||||||
|
#if STATS
|
||||||
|
ra->allocated += size;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void stats_large_deallocate(UNUSED struct region_allocator *ra, UNUSED size_t size) {
|
||||||
|
#if STATS
|
||||||
|
ra->allocated -= size;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static inline void *allocate_small(size_t requested_size) {
|
static inline void *allocate_small(size_t requested_size) {
|
||||||
struct size_info info = get_size_info(requested_size);
|
struct size_info info = get_size_info(requested_size);
|
||||||
size_t size = info.size ? info.size : 16;
|
size_t size = info.size ? info.size : 16;
|
||||||
|
@ -460,11 +498,8 @@ static inline void *allocate_small(size_t requested_size) {
|
||||||
write_after_free_check(p, size - canary_size);
|
write_after_free_check(p, size - canary_size);
|
||||||
set_canary(metadata, p, size);
|
set_canary(metadata, p, size);
|
||||||
}
|
}
|
||||||
|
stats_small_allocate(c, size);
|
||||||
|
|
||||||
#if CONFIG_STATS
|
|
||||||
c->allocated += size;
|
|
||||||
c->nmalloc++;
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -478,9 +513,6 @@ static inline void *allocate_small(size_t requested_size) {
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#if CONFIG_STATS
|
|
||||||
c->slab_allocated += slab_size;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
c->free_slabs_head = c->free_slabs_head->next;
|
c->free_slabs_head = c->free_slabs_head->next;
|
||||||
if (c->free_slabs_head == NULL) {
|
if (c->free_slabs_head == NULL) {
|
||||||
|
@ -498,11 +530,9 @@ static inline void *allocate_small(size_t requested_size) {
|
||||||
if (requested_size) {
|
if (requested_size) {
|
||||||
set_canary(metadata, p, size);
|
set_canary(metadata, p, size);
|
||||||
}
|
}
|
||||||
|
stats_slab_allocate(c, slab_size);
|
||||||
|
stats_small_allocate(c, size);
|
||||||
|
|
||||||
#if CONFIG_STATS
|
|
||||||
c->allocated += size;
|
|
||||||
c->nmalloc++;
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -512,9 +542,6 @@ static inline void *allocate_small(size_t requested_size) {
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#if CONFIG_STATS
|
|
||||||
c->slab_allocated += slab_size;
|
|
||||||
#endif
|
|
||||||
metadata->canary_value = get_random_canary(&c->rng);
|
metadata->canary_value = get_random_canary(&c->rng);
|
||||||
|
|
||||||
c->partial_slabs = metadata;
|
c->partial_slabs = metadata;
|
||||||
|
@ -525,11 +552,9 @@ static inline void *allocate_small(size_t requested_size) {
|
||||||
if (requested_size) {
|
if (requested_size) {
|
||||||
set_canary(metadata, p, size);
|
set_canary(metadata, p, size);
|
||||||
}
|
}
|
||||||
|
stats_slab_allocate(c, slab_size);
|
||||||
|
stats_small_allocate(c, size);
|
||||||
|
|
||||||
#if CONFIG_STATS
|
|
||||||
c->allocated += size;
|
|
||||||
c->nmalloc++;
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -551,11 +576,8 @@ static inline void *allocate_small(size_t requested_size) {
|
||||||
write_after_free_check(p, size - canary_size);
|
write_after_free_check(p, size - canary_size);
|
||||||
set_canary(metadata, p, size);
|
set_canary(metadata, p, size);
|
||||||
}
|
}
|
||||||
|
stats_small_allocate(c, size);
|
||||||
|
|
||||||
#if CONFIG_STATS
|
|
||||||
c->allocated += size;
|
|
||||||
c->nmalloc++;
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -616,10 +638,8 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
|
||||||
size_t slab_size = get_slab_size(slots, size);
|
size_t slab_size = get_slab_size(slots, size);
|
||||||
|
|
||||||
mutex_lock(&c->lock);
|
mutex_lock(&c->lock);
|
||||||
#if CONFIG_STATS
|
|
||||||
c->allocated -= size;
|
stats_small_deallocate(c, size);
|
||||||
c->ndalloc++;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct slab_metadata *metadata = get_metadata(c, p);
|
struct slab_metadata *metadata = get_metadata(c, p);
|
||||||
void *slab = get_slab(c, slab_size, metadata);
|
void *slab = get_slab(c, slab_size, metadata);
|
||||||
|
@ -720,9 +740,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
|
||||||
if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
|
if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
|
||||||
if (!memory_map_fixed(slab, slab_size)) {
|
if (!memory_map_fixed(slab, slab_size)) {
|
||||||
label_slab(slab, slab_size, class);
|
label_slab(slab, slab_size, class);
|
||||||
#if CONFIG_STATS
|
stats_slab_deallocate(c, slab_size);
|
||||||
c->slab_allocated -= slab_size;
|
|
||||||
#endif
|
|
||||||
enqueue_free_slab(c, metadata);
|
enqueue_free_slab(c, metadata);
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&c->lock);
|
||||||
return;
|
return;
|
||||||
|
@ -1175,9 +1193,7 @@ static void *allocate_large(size_t size) {
|
||||||
deallocate_pages(p, size, guard_size);
|
deallocate_pages(p, size, guard_size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#if CONFIG_STATS
|
stats_large_allocate(ra, size);
|
||||||
ra->allocated += size;
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&ra->lock);
|
mutex_unlock(&ra->lock);
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
|
@ -1204,9 +1220,7 @@ static void deallocate_large(void *p, const size_t *expected_size) {
|
||||||
}
|
}
|
||||||
size_t guard_size = region->guard_size;
|
size_t guard_size = region->guard_size;
|
||||||
regions_delete(region);
|
regions_delete(region);
|
||||||
#if CONFIG_STATS
|
stats_large_deallocate(ra, size);
|
||||||
ra->allocated -= size;
|
|
||||||
#endif
|
|
||||||
mutex_unlock(&ra->lock);
|
mutex_unlock(&ra->lock);
|
||||||
|
|
||||||
regions_quarantine_deallocate_pages(p, size, guard_size);
|
regions_quarantine_deallocate_pages(p, size, guard_size);
|
||||||
|
@ -1627,9 +1641,7 @@ EXPORT int h_malloc_trim(UNUSED size_t pad) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
label_slab(slab, slab_size, class);
|
label_slab(slab, slab_size, class);
|
||||||
#if CONFIG_STATS
|
stats_slab_deallocate(c, slab_size);
|
||||||
c->slab_allocated -= slab_size;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct slab_metadata *trimmed = iterator;
|
struct slab_metadata *trimmed = iterator;
|
||||||
iterator = iterator->next;
|
iterator = iterator->next;
|
||||||
|
|
Loading…
Reference in New Issue