2018-08-21 21:23:22 +02:00
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stdatomic.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
|
2018-08-30 09:03:19 +02:00
|
|
|
#include "third_party/libdivide.h"
|
2018-08-27 12:57:44 +02:00
|
|
|
|
2018-09-07 06:17:22 +02:00
|
|
|
#include "config.h"
|
2018-08-21 21:23:22 +02:00
|
|
|
#include "malloc.h"
|
2018-09-07 07:08:51 +02:00
|
|
|
#include "mutex.h"
|
2018-08-29 06:53:12 +02:00
|
|
|
#include "memory.h"
|
2018-09-02 08:03:27 +02:00
|
|
|
#include "pages.h"
|
2018-08-21 21:23:22 +02:00
|
|
|
#include "random.h"
|
|
|
|
#include "util.h"
|
|
|
|
|
2018-10-12 22:02:23 +02:00
|
|
|
// use __register_atfork directly to avoid linking with libpthread for glibc < 2.28
|
|
|
|
#ifdef __GLIBC__
|
|
|
|
extern void *__dso_handle;
|
|
|
|
extern int __register_atfork(void (*)(void), void (*)(void), void (*)(void), void *);
|
|
|
|
#define atfork(prepare, parent, child) __register_atfork(prepare, parent, child, __dso_handle)
|
|
|
|
#else
|
|
|
|
#define atfork pthread_atfork
|
|
|
|
#endif
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
static_assert(sizeof(void *) == 8, "64-bit only");
|
|
|
|
|
2018-09-07 06:33:51 +02:00
|
|
|
static_assert(!WRITE_AFTER_FREE_CHECK || ZERO_ON_FREE, "WRITE_AFTER_FREE_CHECK depends on ZERO_ON_FREE");
|
|
|
|
|
2018-10-04 20:25:16 +02:00
|
|
|
// either sizeof(u64) or 0
|
|
|
|
static const size_t canary_size = SLAB_CANARY ? sizeof(u64) : 0;
|
2018-09-02 14:36:48 +02:00
|
|
|
|
2018-09-01 05:10:26 +02:00
|
|
|
#define CACHELINE_SIZE 64
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
static union {
|
|
|
|
struct {
|
|
|
|
void *slab_region_start;
|
|
|
|
void *slab_region_end;
|
2018-09-01 16:20:23 +02:00
|
|
|
struct region_info *regions[2];
|
2018-08-21 21:23:22 +02:00
|
|
|
atomic_bool initialized;
|
|
|
|
};
|
|
|
|
char padding[PAGE_SIZE];
|
2018-10-13 22:49:06 +02:00
|
|
|
} ro __attribute__((aligned(PAGE_SIZE)));
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
struct slab_metadata {
|
2018-10-07 19:34:52 +02:00
|
|
|
u64 bitmap[4];
|
2018-08-21 21:23:22 +02:00
|
|
|
struct slab_metadata *next;
|
|
|
|
struct slab_metadata *prev;
|
2018-10-04 20:25:16 +02:00
|
|
|
u64 canary_value;
|
2018-08-21 21:23:22 +02:00
|
|
|
};
|
|
|
|
|
2018-09-10 23:42:58 +02:00
|
|
|
static const size_t min_align = 16;
|
2018-08-21 21:23:22 +02:00
|
|
|
static const size_t max_slab_size_class = 16384;
|
|
|
|
|
2018-10-04 20:25:16 +02:00
|
|
|
static const u16 size_classes[] = {
|
2018-08-24 09:22:52 +02:00
|
|
|
/* 0 */ 0,
|
2018-08-21 21:23:22 +02:00
|
|
|
/* 16 */ 16, 32, 48, 64, 80, 96, 112, 128,
|
|
|
|
/* 32 */ 160, 192, 224, 256,
|
|
|
|
/* 64 */ 320, 384, 448, 512,
|
|
|
|
/* 128 */ 640, 768, 896, 1024,
|
|
|
|
/* 256 */ 1280, 1536, 1792, 2048,
|
|
|
|
/* 512 */ 2560, 3072, 3584, 4096,
|
|
|
|
/* 1024 */ 5120, 6144, 7168, 8192,
|
|
|
|
/* 2048 */ 10240, 12288, 14336, 16384
|
|
|
|
};
|
|
|
|
|
2018-10-04 20:25:16 +02:00
|
|
|
static const u16 size_class_slots[] = {
|
2018-08-24 09:22:52 +02:00
|
|
|
/* 0 */ 256,
|
2018-08-23 23:53:10 +02:00
|
|
|
/* 16 */ 256, 128, 85, 64, 51, 42, 36, 64,
|
2018-08-23 23:42:17 +02:00
|
|
|
/* 32 */ 51, 64, 54, 64,
|
2018-08-21 21:23:22 +02:00
|
|
|
/* 64 */ 64, 64, 64, 64,
|
|
|
|
/* 128 */ 64, 64, 64, 64,
|
|
|
|
/* 256 */ 16, 16, 16, 16,
|
|
|
|
/* 512 */ 8, 8, 8, 8,
|
|
|
|
/* 1024 */ 8, 8, 8, 8,
|
2018-08-23 23:57:09 +02:00
|
|
|
/* 2048 */ 6, 5, 4, 4
|
2018-08-21 21:23:22 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#define N_SIZE_CLASSES (sizeof(size_classes) / sizeof(size_classes[0]))
|
|
|
|
|
|
|
|
struct size_info {
|
|
|
|
size_t size;
|
|
|
|
size_t class;
|
|
|
|
};
|
|
|
|
|
2018-08-29 21:13:53 +02:00
|
|
|
static inline struct size_info get_size_info(size_t size) {
|
2018-08-24 09:22:52 +02:00
|
|
|
if (size == 0) {
|
2018-09-11 20:51:36 +02:00
|
|
|
return (struct size_info){0, 0};
|
2018-08-24 09:22:52 +02:00
|
|
|
}
|
2018-09-11 20:44:15 +02:00
|
|
|
if (size <= 128) {
|
|
|
|
return (struct size_info){(size + 15) & ~15, ((size - 1) >> 4) + 1};
|
|
|
|
}
|
|
|
|
for (unsigned class = 9; class < N_SIZE_CLASSES; class++) {
|
2018-08-30 13:13:18 +02:00
|
|
|
size_t real_size = size_classes[class];
|
2018-08-21 21:23:22 +02:00
|
|
|
if (size <= real_size) {
|
2018-08-30 13:13:18 +02:00
|
|
|
return (struct size_info){real_size, class};
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fatal_error("invalid size for slabs");
|
|
|
|
}
|
|
|
|
|
2018-09-10 23:42:58 +02:00
|
|
|
// alignment must be a power of 2 <= PAGE_SIZE since slabs are only page aligned
|
|
|
|
static inline struct size_info get_size_info_align(size_t size, size_t alignment) {
|
|
|
|
for (unsigned class = 1; class < N_SIZE_CLASSES; class++) {
|
|
|
|
size_t real_size = size_classes[class];
|
|
|
|
if (size <= real_size && !(real_size & (alignment - 1))) {
|
|
|
|
return (struct size_info){real_size, class};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fatal_error("invalid size for slabs");
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
static size_t get_slab_size(size_t slots, size_t size) {
|
|
|
|
return PAGE_CEILING(slots * size);
|
|
|
|
}
|
|
|
|
|
2018-09-01 04:42:34 +02:00
|
|
|
// limit on the number of cached empty slabs before attempting purging instead
|
2018-08-30 12:44:58 +02:00
|
|
|
static const size_t max_empty_slabs_total = 64 * 1024;
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
static struct size_class {
|
2018-09-07 07:08:51 +02:00
|
|
|
struct mutex lock;
|
2018-10-14 21:09:55 +02:00
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
void *class_region_start;
|
2018-08-27 11:16:47 +02:00
|
|
|
struct slab_metadata *slab_info;
|
2018-10-14 21:09:55 +02:00
|
|
|
struct libdivide_u32_t size_divisor;
|
|
|
|
struct libdivide_u64_t slab_size_divisor;
|
2018-09-01 04:42:34 +02:00
|
|
|
|
|
|
|
// slabs with at least one allocated slot and at least one free slot
|
|
|
|
//
|
|
|
|
// LIFO doubly-linked list
|
2018-08-21 21:23:22 +02:00
|
|
|
struct slab_metadata *partial_slabs;
|
2018-09-01 04:42:34 +02:00
|
|
|
|
|
|
|
// slabs without allocated slots that are cached for near-term usage
|
|
|
|
//
|
|
|
|
// LIFO singly-linked list
|
2018-08-30 10:55:39 +02:00
|
|
|
struct slab_metadata *empty_slabs;
|
2018-09-01 04:42:34 +02:00
|
|
|
size_t empty_slabs_total; // length * slab_size
|
|
|
|
|
|
|
|
// slabs without allocated slots that are purged and memory protected
|
|
|
|
//
|
|
|
|
// FIFO singly-linked list
|
|
|
|
struct slab_metadata *free_slabs_head;
|
|
|
|
struct slab_metadata *free_slabs_tail;
|
2018-10-13 19:55:44 +02:00
|
|
|
struct slab_metadata *free_slabs_quarantine[FREE_SLABS_QUARANTINE_RANDOM_SIZE];
|
2018-09-01 04:42:34 +02:00
|
|
|
|
2018-08-25 06:21:02 +02:00
|
|
|
struct random_state rng;
|
2018-08-27 11:16:47 +02:00
|
|
|
size_t metadata_allocated;
|
|
|
|
size_t metadata_count;
|
2018-10-06 21:36:03 +02:00
|
|
|
size_t metadata_count_unguarded;
|
2018-09-01 05:10:26 +02:00
|
|
|
} __attribute__((aligned(CACHELINE_SIZE))) size_class_metadata[N_SIZE_CLASSES];
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-10-12 00:12:20 +02:00
|
|
|
#define CLASS_REGION_SIZE (128ULL * 1024 * 1024 * 1024)
|
|
|
|
#define REAL_CLASS_REGION_SIZE (CLASS_REGION_SIZE * 2)
|
|
|
|
static const size_t slab_region_size = REAL_CLASS_REGION_SIZE * N_SIZE_CLASSES;
|
2018-08-21 21:23:22 +02:00
|
|
|
static_assert(PAGE_SIZE == 4096, "bitmap handling will need adjustment for other page sizes");
|
|
|
|
|
2018-09-07 00:41:00 +02:00
|
|
|
static void *get_slab(struct size_class *c, size_t slab_size, struct slab_metadata *metadata) {
|
|
|
|
size_t index = metadata - c->slab_info;
|
|
|
|
return (char *)c->class_region_start + (index * slab_size);
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
static size_t get_metadata_max(size_t slab_size) {
|
2018-10-12 00:12:20 +02:00
|
|
|
return CLASS_REGION_SIZE / slab_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 00:41:00 +02:00
|
|
|
static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_size, bool non_zero_size) {
|
2018-09-07 00:53:06 +02:00
|
|
|
if (unlikely(c->metadata_count >= c->metadata_allocated)) {
|
2018-08-21 21:23:22 +02:00
|
|
|
size_t metadata_max = get_metadata_max(slab_size);
|
2018-09-07 00:53:06 +02:00
|
|
|
if (c->metadata_count >= metadata_max) {
|
2018-08-29 04:46:20 +02:00
|
|
|
errno = ENOMEM;
|
2018-08-21 21:23:22 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
size_t allocate = c->metadata_allocated * 2;
|
|
|
|
if (allocate > metadata_max) {
|
|
|
|
allocate = metadata_max;
|
|
|
|
}
|
2018-08-29 16:52:10 +02:00
|
|
|
if (memory_protect_rw(c->slab_info, allocate * sizeof(struct slab_metadata))) {
|
2018-08-21 21:23:22 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
c->metadata_allocated = allocate;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct slab_metadata *metadata = c->slab_info + c->metadata_count;
|
2018-09-07 00:41:00 +02:00
|
|
|
void *slab = get_slab(c, slab_size, metadata);
|
|
|
|
if (non_zero_size && memory_protect_rw(slab, slab_size)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
c->metadata_count++;
|
2018-10-06 21:36:03 +02:00
|
|
|
c->metadata_count_unguarded++;
|
|
|
|
if (c->metadata_count_unguarded >= GUARD_SLABS_INTERVAL) {
|
2018-09-07 00:53:06 +02:00
|
|
|
c->metadata_count++;
|
2018-10-06 21:36:03 +02:00
|
|
|
c->metadata_count_unguarded = 0;
|
2018-09-07 00:53:06 +02:00
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
return metadata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_slot(struct slab_metadata *metadata, size_t index) {
|
2018-10-07 19:34:52 +02:00
|
|
|
metadata->bitmap[index / 64] |= 1UL << index;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_slot(struct slab_metadata *metadata, size_t index) {
|
2018-10-07 19:34:52 +02:00
|
|
|
metadata->bitmap[index / 64] &= ~(1UL << index);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool get_slot(struct slab_metadata *metadata, size_t index) {
|
2018-10-07 19:34:52 +02:00
|
|
|
return (metadata->bitmap[index / 64] >> index) & 1UL;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-10-04 20:25:16 +02:00
|
|
|
static u64 get_mask(size_t slots) {
|
2018-08-21 21:23:22 +02:00
|
|
|
return slots < 64 ? ~0UL << slots : 0;
|
|
|
|
}
|
|
|
|
|
2018-08-25 09:02:39 +02:00
|
|
|
static size_t get_free_slot(struct random_state *rng, size_t slots, struct slab_metadata *metadata) {
|
2018-09-07 06:25:02 +02:00
|
|
|
if (SLOT_RANDOMIZE) {
|
|
|
|
// randomize start location for linear search (uniform random choice is too slow)
|
2018-10-07 19:34:52 +02:00
|
|
|
unsigned random_index = get_random_u16_uniform(rng, slots);
|
|
|
|
unsigned first_bitmap = random_index / 64;
|
|
|
|
u64 random_split = ~(~0UL << (random_index - first_bitmap * 64));
|
|
|
|
|
2018-10-15 00:49:48 +02:00
|
|
|
for (unsigned i = first_bitmap; i <= (slots - 1) / 64; i++) {
|
2018-10-07 19:34:52 +02:00
|
|
|
u64 masked = metadata->bitmap[i];
|
|
|
|
if (i == slots / 64) {
|
|
|
|
masked |= get_mask(slots - i * 64);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == first_bitmap) {
|
|
|
|
masked |= random_split;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (masked == ~0UL) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-08-25 09:02:39 +02:00
|
|
|
|
2018-10-07 19:34:52 +02:00
|
|
|
return ffzl(masked) - 1 + i * 64;
|
2018-09-07 06:25:02 +02:00
|
|
|
}
|
2018-10-07 19:34:52 +02:00
|
|
|
}
|
|
|
|
|
2018-10-15 00:49:48 +02:00
|
|
|
for (unsigned i = 0; i <= (slots - 1) / 64; i++) {
|
2018-10-07 19:34:52 +02:00
|
|
|
u64 masked = metadata->bitmap[i];
|
2018-10-15 00:49:48 +02:00
|
|
|
if (i == (slots - 1) / 64) {
|
2018-10-07 19:34:52 +02:00
|
|
|
masked |= get_mask(slots - i * 64);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (masked == ~0UL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ffzl(masked) - 1 + i * 64;
|
2018-08-25 09:02:39 +02:00
|
|
|
}
|
2018-09-07 06:25:02 +02:00
|
|
|
|
2018-10-07 19:34:52 +02:00
|
|
|
fatal_error("no zero bits");
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool has_free_slots(size_t slots, struct slab_metadata *metadata) {
|
2018-10-07 19:34:52 +02:00
|
|
|
if (slots <= 64) {
|
|
|
|
u64 masked = metadata->bitmap[0] | get_mask(slots);
|
|
|
|
return masked != ~0UL;
|
|
|
|
} else if (slots <= 128) {
|
|
|
|
u64 masked = metadata->bitmap[1] | get_mask(slots - 64);
|
|
|
|
return metadata->bitmap[0] != ~0UL || masked != ~0UL;
|
|
|
|
} else if (slots <= 192) {
|
|
|
|
u64 masked = metadata->bitmap[2] | get_mask(slots - 128);
|
|
|
|
return metadata->bitmap[0] != ~0UL || metadata->bitmap[1] != ~0UL || masked != ~0UL;
|
2018-08-25 09:02:39 +02:00
|
|
|
}
|
2018-10-07 19:34:52 +02:00
|
|
|
u64 masked = metadata->bitmap[3] | get_mask(slots - 192);
|
|
|
|
return metadata->bitmap[0] != ~0UL || metadata->bitmap[1] != ~0UL || metadata->bitmap[2] != ~0UL || masked != ~0UL;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_free_slab(struct slab_metadata *metadata) {
|
2018-10-07 19:34:52 +02:00
|
|
|
return !metadata->bitmap[0] && !metadata->bitmap[1] && !metadata->bitmap[2] &&
|
|
|
|
!metadata->bitmap[3];
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-08-27 12:57:44 +02:00
|
|
|
static struct slab_metadata *get_metadata(struct size_class *c, void *p) {
|
2018-08-21 21:23:22 +02:00
|
|
|
size_t offset = (char *)p - (char *)c->class_region_start;
|
2018-08-27 12:57:44 +02:00
|
|
|
size_t index = libdivide_u64_do(offset, &c->slab_size_divisor);
|
2018-08-24 11:18:20 +02:00
|
|
|
// still caught without this check either as a read access violation or "double free"
|
|
|
|
if (index >= c->metadata_allocated) {
|
|
|
|
fatal_error("invalid free within a slab yet to be used");
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
return c->slab_info + index;
|
|
|
|
}
|
|
|
|
|
2018-08-24 08:55:53 +02:00
|
|
|
static void *slot_pointer(size_t size, void *slab, size_t slot) {
|
|
|
|
return (char *)slab + slot * size;
|
|
|
|
}
|
|
|
|
|
2018-10-15 00:30:20 +02:00
|
|
|
static void write_after_free_check(const char *p, size_t size) {
|
2018-09-07 06:17:22 +02:00
|
|
|
if (!WRITE_AFTER_FREE_CHECK) {
|
2018-09-07 06:00:32 +02:00
|
|
|
return;
|
2018-09-02 14:36:48 +02:00
|
|
|
}
|
2018-09-07 06:00:32 +02:00
|
|
|
|
2018-10-04 20:25:16 +02:00
|
|
|
for (size_t i = 0; i < size; i += sizeof(u64)) {
|
|
|
|
if (*(u64 *)(p + i)) {
|
2018-09-07 06:17:22 +02:00
|
|
|
fatal_error("detected write after free");
|
2018-09-07 06:00:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:25:16 +02:00
|
|
|
static const u64 canary_mask = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ?
|
2018-10-03 23:17:20 +02:00
|
|
|
0xffffffffffffff00UL :
|
|
|
|
0x00ffffffffffffffUL;
|
2018-10-03 23:09:57 +02:00
|
|
|
|
2018-09-07 06:00:32 +02:00
|
|
|
static void set_canary(struct slab_metadata *metadata, void *p, size_t size) {
|
|
|
|
memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size);
|
2018-09-02 14:36:48 +02:00
|
|
|
}
|
|
|
|
|
2018-09-11 20:18:13 +02:00
|
|
|
static inline void *allocate_small(size_t requested_size) {
|
2018-08-21 21:23:22 +02:00
|
|
|
struct size_info info = get_size_info(requested_size);
|
2018-09-11 20:51:36 +02:00
|
|
|
size_t size = info.size ? info.size : 16;
|
2018-08-21 21:23:22 +02:00
|
|
|
struct size_class *c = &size_class_metadata[info.class];
|
|
|
|
size_t slots = size_class_slots[info.class];
|
|
|
|
size_t slab_size = get_slab_size(slots, size);
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(&c->lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
if (c->partial_slabs == NULL) {
|
2018-08-30 10:55:39 +02:00
|
|
|
if (c->empty_slabs != NULL) {
|
|
|
|
struct slab_metadata *metadata = c->empty_slabs;
|
|
|
|
c->empty_slabs = c->empty_slabs->next;
|
2018-08-30 12:44:58 +02:00
|
|
|
c->empty_slabs_total -= slab_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-08-30 11:42:45 +02:00
|
|
|
metadata->next = NULL;
|
2018-08-21 21:23:22 +02:00
|
|
|
metadata->prev = NULL;
|
|
|
|
|
|
|
|
c->partial_slabs = metadata;
|
|
|
|
|
|
|
|
void *slab = get_slab(c, slab_size, metadata);
|
2018-08-26 00:02:13 +02:00
|
|
|
size_t slot = get_free_slot(&c->rng, slots, metadata);
|
|
|
|
set_slot(metadata, slot);
|
|
|
|
void *p = slot_pointer(size, slab, slot);
|
2018-09-07 06:00:32 +02:00
|
|
|
if (requested_size) {
|
|
|
|
write_after_free_check(p, size - canary_size);
|
|
|
|
set_canary(metadata, p, size);
|
|
|
|
}
|
2018-08-26 00:02:13 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-30 11:18:38 +02:00
|
|
|
return p;
|
2018-09-01 04:42:34 +02:00
|
|
|
} else if (c->free_slabs_head != NULL) {
|
|
|
|
struct slab_metadata *metadata = c->free_slabs_head;
|
2018-09-06 21:07:01 +02:00
|
|
|
metadata->canary_value = get_random_u64(&c->rng);
|
2018-08-30 11:18:38 +02:00
|
|
|
|
|
|
|
void *slab = get_slab(c, slab_size, metadata);
|
2018-09-07 06:00:32 +02:00
|
|
|
if (requested_size && memory_protect_rw(slab, slab_size)) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-30 11:18:38 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-09-01 04:42:34 +02:00
|
|
|
c->free_slabs_head = c->free_slabs_head->next;
|
|
|
|
if (c->free_slabs_head == NULL) {
|
|
|
|
c->free_slabs_tail = NULL;
|
|
|
|
}
|
2018-08-30 11:18:38 +02:00
|
|
|
|
|
|
|
metadata->next = NULL;
|
|
|
|
metadata->prev = NULL;
|
|
|
|
|
|
|
|
c->partial_slabs = metadata;
|
|
|
|
|
|
|
|
size_t slot = get_free_slot(&c->rng, slots, metadata);
|
|
|
|
set_slot(metadata, slot);
|
|
|
|
void *p = slot_pointer(size, slab, slot);
|
2018-09-07 06:00:32 +02:00
|
|
|
if (requested_size) {
|
|
|
|
set_canary(metadata, p, size);
|
|
|
|
}
|
2018-08-30 11:18:38 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-26 00:02:13 +02:00
|
|
|
return p;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 00:41:00 +02:00
|
|
|
struct slab_metadata *metadata = alloc_metadata(c, slab_size, requested_size);
|
|
|
|
if (unlikely(metadata == NULL)) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2018-10-03 23:09:57 +02:00
|
|
|
metadata->canary_value = get_random_u64(&c->rng) & canary_mask;
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
c->partial_slabs = metadata;
|
2018-09-07 00:41:00 +02:00
|
|
|
void *slab = get_slab(c, slab_size, metadata);
|
2018-08-26 00:02:13 +02:00
|
|
|
size_t slot = get_free_slot(&c->rng, slots, metadata);
|
|
|
|
set_slot(metadata, slot);
|
|
|
|
void *p = slot_pointer(size, slab, slot);
|
2018-09-07 06:00:32 +02:00
|
|
|
if (requested_size) {
|
|
|
|
set_canary(metadata, p, size);
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-26 00:02:13 +02:00
|
|
|
return p;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct slab_metadata *metadata = c->partial_slabs;
|
2018-08-25 09:02:39 +02:00
|
|
|
size_t slot = get_free_slot(&c->rng, slots, metadata);
|
2018-08-21 21:23:22 +02:00
|
|
|
set_slot(metadata, slot);
|
|
|
|
|
|
|
|
if (!has_free_slots(slots, metadata)) {
|
|
|
|
c->partial_slabs = c->partial_slabs->next;
|
|
|
|
if (c->partial_slabs) {
|
|
|
|
c->partial_slabs->prev = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void *slab = get_slab(c, slab_size, metadata);
|
2018-08-24 08:55:53 +02:00
|
|
|
void *p = slot_pointer(size, slab, slot);
|
2018-09-07 06:00:32 +02:00
|
|
|
if (requested_size) {
|
|
|
|
write_after_free_check(p, size - canary_size);
|
|
|
|
set_canary(metadata, p, size);
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t slab_size_class(void *p) {
|
|
|
|
size_t offset = (char *)p - (char *)ro.slab_region_start;
|
2018-10-12 00:12:20 +02:00
|
|
|
return offset / REAL_CLASS_REGION_SIZE;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t slab_usable_size(void *p) {
|
|
|
|
return size_classes[slab_size_class(p)];
|
|
|
|
}
|
|
|
|
|
2018-09-01 04:42:34 +02:00
|
|
|
static void enqueue_free_slab(struct size_class *c, struct slab_metadata *metadata) {
|
|
|
|
metadata->next = NULL;
|
|
|
|
|
2018-10-13 19:55:44 +02:00
|
|
|
static_assert(FREE_SLABS_QUARANTINE_RANDOM_SIZE < (u16)-1, "free slabs quarantine too large");
|
|
|
|
size_t index = get_random_u16_uniform(&c->rng, FREE_SLABS_QUARANTINE_RANDOM_SIZE);
|
|
|
|
struct slab_metadata *substitute = c->free_slabs_quarantine[index];
|
|
|
|
c->free_slabs_quarantine[index] = metadata;
|
|
|
|
|
|
|
|
if (substitute == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-01 04:42:34 +02:00
|
|
|
if (c->free_slabs_tail != NULL) {
|
2018-10-13 19:55:44 +02:00
|
|
|
c->free_slabs_tail->next = substitute;
|
2018-09-01 04:42:34 +02:00
|
|
|
} else {
|
2018-10-13 19:55:44 +02:00
|
|
|
c->free_slabs_head = substitute;
|
2018-09-01 04:42:34 +02:00
|
|
|
}
|
2018-10-13 19:55:44 +02:00
|
|
|
c->free_slabs_tail = substitute;
|
2018-09-01 04:42:34 +02:00
|
|
|
}
|
|
|
|
|
2018-10-15 00:28:01 +02:00
|
|
|
static inline void deallocate_small(void *p, const size_t *expected_size) {
|
2018-08-21 21:23:22 +02:00
|
|
|
size_t class = slab_size_class(p);
|
|
|
|
|
|
|
|
struct size_class *c = &size_class_metadata[class];
|
|
|
|
size_t size = size_classes[class];
|
2018-09-18 23:28:52 +02:00
|
|
|
if (expected_size && size != *expected_size) {
|
2018-10-05 07:05:40 +02:00
|
|
|
fatal_error("sized deallocation mismatch (small)");
|
2018-09-18 23:28:52 +02:00
|
|
|
}
|
2018-08-25 08:48:47 +02:00
|
|
|
bool is_zero_size = size == 0;
|
|
|
|
if (is_zero_size) {
|
|
|
|
size = 16;
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
size_t slots = size_class_slots[class];
|
|
|
|
size_t slab_size = get_slab_size(slots, size);
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(&c->lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-08-27 12:57:44 +02:00
|
|
|
struct slab_metadata *metadata = get_metadata(c, p);
|
2018-08-24 08:55:53 +02:00
|
|
|
void *slab = get_slab(c, slab_size, metadata);
|
2018-08-27 12:57:44 +02:00
|
|
|
size_t slot = libdivide_u32_do((char *)p - (char *)slab, &c->size_divisor);
|
2018-08-24 08:55:53 +02:00
|
|
|
|
|
|
|
if (slot_pointer(size, slab, slot) != p) {
|
|
|
|
fatal_error("invalid unaligned free");
|
|
|
|
}
|
|
|
|
|
2018-08-24 10:51:52 +02:00
|
|
|
if (!get_slot(metadata, slot)) {
|
|
|
|
fatal_error("double free");
|
|
|
|
}
|
|
|
|
|
2018-09-02 14:36:48 +02:00
|
|
|
if (!is_zero_size) {
|
2018-09-07 06:33:51 +02:00
|
|
|
if (ZERO_ON_FREE) {
|
|
|
|
memset(p, 0, size - canary_size);
|
|
|
|
}
|
2018-09-02 14:36:48 +02:00
|
|
|
|
|
|
|
if (canary_size) {
|
2018-10-04 20:25:16 +02:00
|
|
|
u64 canary_value;
|
2018-09-02 14:36:48 +02:00
|
|
|
memcpy(&canary_value, (char *)p + size - canary_size, canary_size);
|
|
|
|
if (unlikely(canary_value != metadata->canary_value)) {
|
|
|
|
fatal_error("canary corrupted");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
if (!has_free_slots(slots, metadata)) {
|
|
|
|
metadata->next = c->partial_slabs;
|
|
|
|
metadata->prev = NULL;
|
|
|
|
|
|
|
|
if (c->partial_slabs) {
|
|
|
|
c->partial_slabs->prev = metadata;
|
|
|
|
}
|
|
|
|
c->partial_slabs = metadata;
|
|
|
|
}
|
|
|
|
|
|
|
|
clear_slot(metadata, slot);
|
|
|
|
|
|
|
|
if (is_free_slab(metadata)) {
|
|
|
|
if (metadata->prev) {
|
|
|
|
metadata->prev->next = metadata->next;
|
|
|
|
} else {
|
|
|
|
c->partial_slabs = metadata->next;
|
|
|
|
}
|
|
|
|
if (metadata->next) {
|
|
|
|
metadata->next->prev = metadata->prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
metadata->prev = NULL;
|
2018-08-30 12:44:58 +02:00
|
|
|
|
|
|
|
if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
|
|
|
|
if (!memory_map_fixed(slab, slab_size)) {
|
2018-09-01 04:42:34 +02:00
|
|
|
enqueue_free_slab(c, metadata);
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-30 12:44:58 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// handle out-of-memory by just putting it into the empty slabs list
|
|
|
|
}
|
|
|
|
|
|
|
|
metadata->next = c->empty_slabs;
|
2018-08-30 10:55:39 +02:00
|
|
|
c->empty_slabs = metadata;
|
2018-08-30 12:44:58 +02:00
|
|
|
c->empty_slabs_total += slab_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct region_info {
|
|
|
|
void *p;
|
|
|
|
size_t size;
|
2018-08-26 10:42:01 +02:00
|
|
|
size_t guard_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
};
|
|
|
|
|
2018-10-09 20:08:36 +02:00
|
|
|
struct quarantine_info {
|
|
|
|
void *p;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
2018-10-12 00:12:20 +02:00
|
|
|
#define INITIAL_REGION_TABLE_SIZE 256
|
|
|
|
static const size_t max_region_table_size = CLASS_REGION_SIZE / PAGE_SIZE;
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-08-25 06:21:02 +02:00
|
|
|
static struct random_state regions_rng;
|
2018-08-21 21:23:22 +02:00
|
|
|
static struct region_info *regions;
|
2018-10-12 00:12:20 +02:00
|
|
|
static size_t regions_total = INITIAL_REGION_TABLE_SIZE;
|
|
|
|
static size_t regions_free = INITIAL_REGION_TABLE_SIZE;
|
2018-09-07 07:08:51 +02:00
|
|
|
static struct mutex regions_lock = MUTEX_INITIALIZER;
|
2018-10-12 21:03:59 +02:00
|
|
|
static struct quarantine_info regions_quarantine_random[REGION_QUARANTINE_RANDOM_SIZE];
|
|
|
|
static struct quarantine_info regions_quarantine_queue[REGION_QUARANTINE_QUEUE_SIZE];
|
2018-10-08 21:50:31 +02:00
|
|
|
static size_t regions_quarantine_index;
|
|
|
|
|
|
|
|
static void regions_quarantine_deallocate_pages(void *p, size_t size, size_t guard_size) {
|
|
|
|
if (size >= REGION_QUARANTINE_SKIP_THRESHOLD) {
|
|
|
|
deallocate_pages(p, size, guard_size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(memory_map_fixed(p, size))) {
|
|
|
|
deallocate_pages(p, size, guard_size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-10-12 21:03:59 +02:00
|
|
|
struct quarantine_info a =
|
2018-10-09 20:08:36 +02:00
|
|
|
(struct quarantine_info){(char *)p - guard_size, size + guard_size * 2};
|
2018-10-12 21:03:59 +02:00
|
|
|
|
|
|
|
mutex_lock(®ions_lock);
|
|
|
|
|
|
|
|
size_t index = get_random_u64_uniform(®ions_rng, REGION_QUARANTINE_RANDOM_SIZE);
|
|
|
|
struct quarantine_info b = regions_quarantine_random[index];
|
|
|
|
regions_quarantine_random[index] = a;
|
|
|
|
if (b.p == NULL) {
|
|
|
|
mutex_unlock(®ions_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
a = regions_quarantine_queue[regions_quarantine_index];
|
|
|
|
regions_quarantine_queue[regions_quarantine_index] = b;
|
|
|
|
regions_quarantine_index = (regions_quarantine_index + 1) % REGION_QUARANTINE_QUEUE_SIZE;
|
|
|
|
|
|
|
|
mutex_unlock(®ions_lock);
|
2018-10-12 21:06:51 +02:00
|
|
|
|
|
|
|
if (a.p != NULL) {
|
|
|
|
memory_unmap(a.p, a.size);
|
|
|
|
}
|
2018-10-08 21:50:31 +02:00
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
static size_t hash_page(void *p) {
|
|
|
|
uintptr_t u = (uintptr_t)p >> PAGE_SHIFT;
|
|
|
|
size_t sum = u;
|
|
|
|
sum = (sum << 7) - sum + (u >> 16);
|
|
|
|
sum = (sum << 7) - sum + (u >> 32);
|
|
|
|
sum = (sum << 7) - sum + (u >> 48);
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int regions_grow(void) {
|
|
|
|
if (regions_total > SIZE_MAX / sizeof(struct region_info) / 2) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t newtotal = regions_total * 2;
|
|
|
|
size_t newsize = newtotal * sizeof(struct region_info);
|
|
|
|
size_t mask = newtotal - 1;
|
|
|
|
|
2018-09-01 16:20:23 +02:00
|
|
|
if (newtotal > max_region_table_size) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct region_info *p = regions == ro.regions[0] ?
|
|
|
|
ro.regions[1] : ro.regions[0];
|
|
|
|
|
|
|
|
if (memory_protect_rw(p, newsize)) {
|
2018-08-21 21:23:22 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < regions_total; i++) {
|
|
|
|
void *q = regions[i].p;
|
|
|
|
if (q != NULL) {
|
|
|
|
size_t index = hash_page(q) & mask;
|
|
|
|
while (p[index].p != NULL) {
|
|
|
|
index = (index - 1) & mask;
|
|
|
|
}
|
|
|
|
p[index] = regions[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-01 16:20:23 +02:00
|
|
|
memory_map_fixed(regions, regions_total * sizeof(struct region_info));
|
2018-08-21 21:23:22 +02:00
|
|
|
regions_free = regions_free + regions_total;
|
|
|
|
regions_total = newtotal;
|
|
|
|
regions = p;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-26 10:42:01 +02:00
|
|
|
static int regions_insert(void *p, size_t size, size_t guard_size) {
|
2018-08-21 21:23:22 +02:00
|
|
|
if (regions_free * 4 < regions_total) {
|
|
|
|
if (regions_grow()) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t mask = regions_total - 1;
|
|
|
|
size_t index = hash_page(p) & mask;
|
|
|
|
void *q = regions[index].p;
|
|
|
|
while (q != NULL) {
|
|
|
|
index = (index - 1) & mask;
|
|
|
|
q = regions[index].p;
|
|
|
|
}
|
|
|
|
regions[index].p = p;
|
|
|
|
regions[index].size = size;
|
2018-08-26 10:42:01 +02:00
|
|
|
regions[index].guard_size = guard_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
regions_free--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct region_info *regions_find(void *p) {
|
|
|
|
size_t mask = regions_total - 1;
|
|
|
|
size_t index = hash_page(p) & mask;
|
|
|
|
void *r = regions[index].p;
|
|
|
|
while (r != p && r != NULL) {
|
|
|
|
index = (index - 1) & mask;
|
|
|
|
r = regions[index].p;
|
|
|
|
}
|
|
|
|
return (r == p && r != NULL) ? ®ions[index] : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regions_delete(struct region_info *region) {
|
|
|
|
size_t mask = regions_total - 1;
|
|
|
|
|
|
|
|
regions_free++;
|
|
|
|
|
|
|
|
size_t i = region - regions;
|
|
|
|
for (;;) {
|
|
|
|
regions[i].p = NULL;
|
|
|
|
regions[i].size = 0;
|
|
|
|
size_t j = i;
|
|
|
|
for (;;) {
|
|
|
|
i = (i - 1) & mask;
|
|
|
|
if (regions[i].p == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
size_t r = hash_page(regions[i].p) & mask;
|
|
|
|
if ((i <= r && r < j) || (r < j && j < i) || (j < i && i <= r)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
regions[j] = regions[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-06 20:17:06 +02:00
|
|
|
static void full_lock(void) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-30 13:13:18 +02:00
|
|
|
for (unsigned class = 0; class < N_SIZE_CLASSES; class++) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(&size_class_metadata[class].lock);
|
2018-08-23 23:15:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-06 20:17:06 +02:00
|
|
|
static void full_unlock(void) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-30 13:13:18 +02:00
|
|
|
for (unsigned class = 0; class < N_SIZE_CLASSES; class++) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&size_class_metadata[class].lock);
|
2018-08-23 23:15:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void post_fork_child(void) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_init(®ions_lock);
|
2018-08-25 06:21:02 +02:00
|
|
|
random_state_init(®ions_rng);
|
2018-08-30 13:13:18 +02:00
|
|
|
for (unsigned class = 0; class < N_SIZE_CLASSES; class++) {
|
|
|
|
struct size_class *c = &size_class_metadata[class];
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_init(&c->lock);
|
2018-08-25 06:21:02 +02:00
|
|
|
random_state_init(&c->rng);
|
2018-08-23 23:15:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-03 01:58:57 +02:00
|
|
|
static inline bool is_init(void) {
|
|
|
|
return atomic_load_explicit(&ro.initialized, memory_order_acquire);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void enforce_init(void) {
|
|
|
|
if (!is_init()) {
|
|
|
|
fatal_error("invalid uninitialized allocator usage");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
COLD static void init_slow_path(void) {
|
2018-09-07 07:08:51 +02:00
|
|
|
static struct mutex lock = MUTEX_INITIALIZER;
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(&lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-03 01:58:57 +02:00
|
|
|
if (is_init()) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-08-29 16:59:37 +02:00
|
|
|
if (sysconf(_SC_PAGESIZE) != PAGE_SIZE) {
|
|
|
|
fatal_error("page size mismatch");
|
|
|
|
}
|
|
|
|
|
2018-09-06 22:02:15 +02:00
|
|
|
random_state_init(®ions_rng);
|
2018-09-01 16:20:23 +02:00
|
|
|
for (unsigned i = 0; i < 2; i++) {
|
|
|
|
ro.regions[i] = allocate_pages(max_region_table_size, PAGE_SIZE, false);
|
|
|
|
if (ro.regions[i] == NULL) {
|
|
|
|
fatal_error("failed to reserve memory for regions table");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
regions = ro.regions[0];
|
|
|
|
if (memory_protect_rw(regions, regions_total * sizeof(struct region_info))) {
|
|
|
|
fatal_error("failed to unprotect memory for regions table");
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ro.slab_region_start = memory_map(slab_region_size);
|
|
|
|
if (ro.slab_region_start == NULL) {
|
|
|
|
fatal_error("failed to allocate slab region");
|
|
|
|
}
|
|
|
|
ro.slab_region_end = (char *)ro.slab_region_start + slab_region_size;
|
|
|
|
|
2018-08-30 13:13:18 +02:00
|
|
|
for (unsigned class = 0; class < N_SIZE_CLASSES; class++) {
|
|
|
|
struct size_class *c = &size_class_metadata[class];
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_init(&c->lock);
|
2018-08-25 06:21:02 +02:00
|
|
|
random_state_init(&c->rng);
|
|
|
|
|
2018-10-12 00:12:20 +02:00
|
|
|
size_t bound = (REAL_CLASS_REGION_SIZE - CLASS_REGION_SIZE) / PAGE_SIZE - 1;
|
2018-09-06 22:02:15 +02:00
|
|
|
size_t gap = (get_random_u64_uniform(®ions_rng, bound) + 1) * PAGE_SIZE;
|
2018-10-12 00:12:20 +02:00
|
|
|
c->class_region_start = (char *)ro.slab_region_start + REAL_CLASS_REGION_SIZE * class + gap;
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-08-30 13:13:18 +02:00
|
|
|
size_t size = size_classes[class];
|
2018-08-24 09:22:52 +02:00
|
|
|
if (size == 0) {
|
|
|
|
size = 16;
|
|
|
|
}
|
2018-08-27 12:57:44 +02:00
|
|
|
c->size_divisor = libdivide_u32_gen(size);
|
2018-08-30 13:13:18 +02:00
|
|
|
size_t slab_size = get_slab_size(size_class_slots[class], size);
|
2018-08-27 15:21:42 +02:00
|
|
|
c->slab_size_divisor = libdivide_u64_gen(slab_size);
|
|
|
|
size_t metadata_max = get_metadata_max(slab_size);
|
2018-08-24 10:07:46 +02:00
|
|
|
c->slab_info = allocate_pages(metadata_max * sizeof(struct slab_metadata), PAGE_SIZE, false);
|
2018-08-21 21:23:22 +02:00
|
|
|
if (c->slab_info == NULL) {
|
|
|
|
fatal_error("failed to allocate slab metadata");
|
|
|
|
}
|
2018-08-25 21:43:50 +02:00
|
|
|
c->metadata_allocated = PAGE_SIZE / sizeof(struct slab_metadata);
|
2018-08-29 16:52:10 +02:00
|
|
|
if (memory_protect_rw(c->slab_info, c->metadata_allocated * sizeof(struct slab_metadata))) {
|
2018-08-21 21:23:22 +02:00
|
|
|
fatal_error("failed to allocate initial slab info");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_store_explicit(&ro.initialized, true, memory_order_release);
|
|
|
|
|
2018-08-29 16:52:10 +02:00
|
|
|
if (memory_protect_ro(&ro, sizeof(ro))) {
|
2018-08-21 21:23:22 +02:00
|
|
|
fatal_error("failed to protect allocator data");
|
|
|
|
}
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&lock);
|
2018-09-07 01:43:18 +02:00
|
|
|
|
|
|
|
// may allocate, so wait until the allocator is initialized to avoid deadlocking
|
2018-10-12 22:02:23 +02:00
|
|
|
if (atfork(full_lock, full_unlock, post_fork_child)) {
|
2018-09-07 01:43:18 +02:00
|
|
|
fatal_error("pthread_atfork failed");
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-08-27 15:23:35 +02:00
|
|
|
static inline void init(void) {
|
2018-09-11 20:12:27 +02:00
|
|
|
if (unlikely(!is_init())) {
|
|
|
|
init_slow_path();
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-03 01:41:26 +02:00
|
|
|
// trigger early initialization to set up pthread_atfork and protect state as soon as possible
|
|
|
|
COLD __attribute__((constructor(101))) static void trigger_early_init(void) {
|
|
|
|
// avoid calling init directly to skip it if this isn't the malloc implementation
|
|
|
|
h_free(h_malloc(16));
|
2018-08-29 19:43:35 +02:00
|
|
|
}
|
|
|
|
|
2018-08-26 10:42:01 +02:00
|
|
|
static size_t get_guard_size(struct random_state *state, size_t size) {
|
2018-10-06 21:17:55 +02:00
|
|
|
return (get_random_u64_uniform(state, size / PAGE_SIZE / GUARD_SIZE_DIVISOR) + 1) * PAGE_SIZE;
|
2018-08-26 10:42:01 +02:00
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
static void *allocate(size_t size) {
|
|
|
|
if (size <= max_slab_size_class) {
|
2018-09-11 20:18:13 +02:00
|
|
|
return allocate_small(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
size_t guard_size = get_guard_size(®ions_rng, size);
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
|
|
|
|
void *p = allocate_pages(size, guard_size, true);
|
2018-08-21 21:23:22 +02:00
|
|
|
if (p == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
if (regions_insert(p, size, guard_size)) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
deallocate_pages(p, size, guard_size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2018-10-15 00:28:01 +02:00
|
|
|
static void deallocate_large(void *p, const size_t *expected_size) {
|
2018-09-11 20:13:18 +02:00
|
|
|
enforce_init();
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
struct region_info *region = regions_find(p);
|
|
|
|
if (region == NULL) {
|
|
|
|
fatal_error("invalid free");
|
|
|
|
}
|
|
|
|
size_t size = region->size;
|
2018-09-18 23:28:52 +02:00
|
|
|
if (expected_size && size != *expected_size) {
|
2018-10-05 07:05:40 +02:00
|
|
|
fatal_error("sized deallocation mismatch (large)");
|
2018-09-18 23:28:52 +02:00
|
|
|
}
|
2018-08-26 10:42:01 +02:00
|
|
|
size_t guard_size = region->guard_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
regions_delete(region);
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-10-08 21:50:31 +02:00
|
|
|
regions_quarantine_deallocate_pages(p, size, guard_size);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-09-02 14:36:48 +02:00
|
|
|
static size_t adjust_size_for_canaries(size_t size) {
|
|
|
|
if (size > 0 && size <= max_slab_size_class) {
|
|
|
|
return size + canary_size;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
EXPORT void *h_malloc(size_t size) {
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return allocate(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT void *h_calloc(size_t nmemb, size_t size) {
|
|
|
|
size_t total_size;
|
2018-08-29 04:46:20 +02:00
|
|
|
if (unlikely(__builtin_mul_overflow(nmemb, size, &total_size))) {
|
2018-08-21 21:23:22 +02:00
|
|
|
errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
total_size = adjust_size_for_canaries(total_size);
|
2018-09-07 06:33:51 +02:00
|
|
|
if (ZERO_ON_FREE) {
|
|
|
|
return allocate(total_size);
|
|
|
|
}
|
|
|
|
void *p = allocate(total_size);
|
|
|
|
if (unlikely(p == NULL)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-09-07 22:32:51 +02:00
|
|
|
if (size && size <= max_slab_size_class) {
|
2018-09-07 06:33:51 +02:00
|
|
|
memset(p, 0, total_size - canary_size);
|
|
|
|
}
|
|
|
|
return p;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-10-08 23:09:57 +02:00
|
|
|
#define MREMAP_MOVE_THRESHOLD (32 * 1024 * 1024)
|
2018-10-08 21:50:31 +02:00
|
|
|
|
2018-10-08 23:09:57 +02:00
|
|
|
static_assert(MREMAP_MOVE_THRESHOLD >= REGION_QUARANTINE_SKIP_THRESHOLD,
|
|
|
|
"mremap move threshold must be above region quarantine limit");
|
2018-08-28 15:49:18 +02:00
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
EXPORT void *h_realloc(void *old, size_t size) {
|
|
|
|
if (old == NULL) {
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return allocate(size);
|
|
|
|
}
|
|
|
|
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
size_t old_size;
|
|
|
|
if (old >= ro.slab_region_start && old < ro.slab_region_end) {
|
|
|
|
old_size = slab_usable_size(old);
|
|
|
|
if (size <= max_slab_size_class && get_size_info(size).size == old_size) {
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
} else {
|
2018-09-11 20:13:18 +02:00
|
|
|
enforce_init();
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
struct region_info *region = regions_find(old);
|
|
|
|
if (region == NULL) {
|
|
|
|
fatal_error("invalid realloc");
|
|
|
|
}
|
|
|
|
old_size = region->size;
|
2018-09-05 11:17:40 +02:00
|
|
|
size_t old_guard_size = region->guard_size;
|
2018-08-21 21:23:22 +02:00
|
|
|
if (PAGE_CEILING(old_size) == PAGE_CEILING(size)) {
|
|
|
|
region->size = size;
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
return old;
|
|
|
|
}
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-28 15:49:18 +02:00
|
|
|
|
2018-10-06 16:40:55 +02:00
|
|
|
size_t old_rounded_size = PAGE_CEILING(old_size);
|
|
|
|
size_t rounded_size = PAGE_CEILING(size);
|
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
if (size > max_slab_size_class) {
|
|
|
|
// in-place shrink
|
|
|
|
if (size < old_size) {
|
|
|
|
void *new_end = (char *)old + rounded_size;
|
|
|
|
if (memory_map_fixed(new_end, old_guard_size)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
void *new_guard_end = (char *)new_end + old_guard_size;
|
2018-10-08 21:50:31 +02:00
|
|
|
regions_quarantine_deallocate_pages(new_guard_end, old_rounded_size - rounded_size, 0);
|
2018-09-05 11:17:40 +02:00
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
mutex_lock(®ions_lock);
|
|
|
|
struct region_info *region = regions_find(old);
|
|
|
|
if (region == NULL) {
|
|
|
|
fatal_error("invalid realloc");
|
|
|
|
}
|
|
|
|
region->size = size;
|
|
|
|
mutex_unlock(®ions_lock);
|
2018-09-05 11:17:40 +02:00
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
return old;
|
|
|
|
}
|
2018-09-05 11:17:40 +02:00
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
// in-place growth
|
2018-10-06 16:40:55 +02:00
|
|
|
void *guard_end = (char *)old + old_rounded_size + old_guard_size;
|
|
|
|
size_t extra = rounded_size - old_rounded_size;
|
|
|
|
if (!memory_remap((char *)old + old_rounded_size, old_guard_size, old_guard_size + extra)) {
|
|
|
|
if (memory_protect_rw((char *)old + old_rounded_size, extra)) {
|
|
|
|
memory_unmap(guard_end, extra);
|
|
|
|
} else {
|
|
|
|
mutex_lock(®ions_lock);
|
|
|
|
struct region_info *region = regions_find(old);
|
|
|
|
if (region == NULL) {
|
|
|
|
fatal_error("invalid realloc");
|
|
|
|
}
|
|
|
|
region->size = size;
|
|
|
|
mutex_unlock(®ions_lock);
|
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
size_t copy_size = size < old_size ? size : old_size;
|
2018-10-08 23:09:57 +02:00
|
|
|
if (copy_size >= MREMAP_MOVE_THRESHOLD) {
|
2018-10-06 19:31:16 +02:00
|
|
|
void *new = allocate(size);
|
|
|
|
if (new == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-08-28 15:49:18 +02:00
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
mutex_lock(®ions_lock);
|
|
|
|
struct region_info *region = regions_find(old);
|
|
|
|
if (region == NULL) {
|
|
|
|
fatal_error("invalid realloc");
|
|
|
|
}
|
|
|
|
regions_delete(region);
|
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-28 15:49:18 +02:00
|
|
|
|
2018-10-06 19:31:16 +02:00
|
|
|
if (memory_remap_fixed(old, old_size, new, size)) {
|
|
|
|
memcpy(new, old, copy_size);
|
|
|
|
deallocate_pages(old, old_size, old_guard_size);
|
|
|
|
} else {
|
|
|
|
memory_unmap((char *)old - old_guard_size, old_guard_size);
|
|
|
|
memory_unmap((char *)old + PAGE_CEILING(old_size), old_guard_size);
|
|
|
|
}
|
|
|
|
return new;
|
2018-08-28 15:49:18 +02:00
|
|
|
}
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *new = allocate(size);
|
|
|
|
if (new == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
size_t copy_size = size < old_size ? size : old_size;
|
2018-09-02 14:36:48 +02:00
|
|
|
if (size > 0 && size <= max_slab_size_class) {
|
|
|
|
copy_size -= canary_size;
|
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
memcpy(new, old, copy_size);
|
2018-09-11 20:13:18 +02:00
|
|
|
if (old_size <= max_slab_size_class) {
|
2018-09-18 23:28:52 +02:00
|
|
|
deallocate_small(old, NULL);
|
2018-09-11 20:13:18 +02:00
|
|
|
} else {
|
2018-09-18 23:28:52 +02:00
|
|
|
deallocate_large(old, NULL);
|
2018-09-11 20:13:18 +02:00
|
|
|
}
|
2018-08-21 21:23:22 +02:00
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int alloc_aligned(void **memptr, size_t alignment, size_t size, size_t min_alignment) {
|
|
|
|
if ((alignment - 1) & alignment || alignment < min_alignment) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (alignment <= PAGE_SIZE) {
|
2018-09-10 23:42:58 +02:00
|
|
|
if (size <= max_slab_size_class && alignment > min_align) {
|
|
|
|
size = get_size_info_align(size, alignment).size;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *p = allocate(size);
|
|
|
|
if (p == NULL) {
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
*memptr = p;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
size_t guard_size = get_guard_size(®ions_rng, size);
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
|
|
|
|
void *p = allocate_pages_aligned(size, alignment, guard_size);
|
2018-08-21 21:23:22 +02:00
|
|
|
if (p == NULL) {
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2018-08-26 10:37:43 +02:00
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
if (regions_insert(p, size, guard_size)) {
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-26 10:42:01 +02:00
|
|
|
deallocate_pages(p, size, guard_size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-26 10:37:43 +02:00
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
*memptr = p;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *alloc_aligned_simple(size_t alignment, size_t size) {
|
|
|
|
void *ptr;
|
|
|
|
int ret = alloc_aligned(&ptr, alignment, size, 1);
|
|
|
|
if (ret) {
|
|
|
|
errno = ret;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT int h_posix_memalign(void **memptr, size_t alignment, size_t size) {
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return alloc_aligned(memptr, alignment, size, sizeof(void *));
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT void *h_aligned_alloc(size_t alignment, size_t size) {
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return alloc_aligned_simple(alignment, size);
|
|
|
|
}
|
|
|
|
|
2018-09-06 22:30:22 +02:00
|
|
|
EXPORT void *h_memalign(size_t alignment, size_t size) ALIAS(h_aligned_alloc);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
EXPORT void *h_valloc(size_t size) {
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-08-21 21:23:22 +02:00
|
|
|
return alloc_aligned_simple(PAGE_SIZE, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT void *h_pvalloc(size_t size) {
|
2018-10-14 23:43:03 +02:00
|
|
|
size = PAGE_CEILING(size);
|
|
|
|
if (!size) {
|
2018-08-21 21:23:22 +02:00
|
|
|
errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
init();
|
2018-09-02 14:36:48 +02:00
|
|
|
size = adjust_size_for_canaries(size);
|
2018-10-14 23:43:03 +02:00
|
|
|
return alloc_aligned_simple(PAGE_SIZE, size);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT void h_free(void *p) {
|
|
|
|
if (p == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-11 20:13:18 +02:00
|
|
|
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
|
2018-09-18 23:28:52 +02:00
|
|
|
deallocate_small(p, NULL);
|
2018-09-11 20:13:18 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-18 23:28:52 +02:00
|
|
|
deallocate_large(p, NULL);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-08-29 21:06:49 +02:00
|
|
|
EXPORT void h_cfree(void *ptr) ALIAS(h_free);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
2018-09-18 23:28:52 +02:00
|
|
|
EXPORT void h_free_sized(void *p, size_t expected_size) {
|
|
|
|
if (p == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
|
|
|
|
expected_size = get_size_info(adjust_size_for_canaries(expected_size)).size;
|
|
|
|
deallocate_small(p, &expected_size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
deallocate_large(p, &expected_size);
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
EXPORT size_t h_malloc_usable_size(void *p) {
|
|
|
|
if (p == NULL) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
|
2018-09-02 14:36:48 +02:00
|
|
|
size_t size = slab_usable_size(p);
|
|
|
|
return size ? size - canary_size : 0;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-09-11 20:13:18 +02:00
|
|
|
enforce_init();
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
struct region_info *region = regions_find(p);
|
|
|
|
if (p == NULL) {
|
|
|
|
fatal_error("invalid malloc_usable_size");
|
|
|
|
}
|
|
|
|
size_t size = region->size;
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2018-08-29 19:43:35 +02:00
|
|
|
EXPORT size_t h_malloc_object_size(void *p) {
|
2018-09-11 20:13:18 +02:00
|
|
|
if (p == NULL) {
|
2018-08-29 19:43:35 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
|
2018-09-02 14:36:48 +02:00
|
|
|
size_t size = slab_usable_size(p);
|
|
|
|
return size ? size - canary_size : 0;
|
2018-08-29 19:43:35 +02:00
|
|
|
}
|
|
|
|
|
2018-09-11 20:13:18 +02:00
|
|
|
if (unlikely(!is_init())) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(®ions_lock);
|
2018-08-29 19:43:35 +02:00
|
|
|
struct region_info *region = regions_find(p);
|
|
|
|
size_t size = p == NULL ? SIZE_MAX : region->size;
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(®ions_lock);
|
2018-08-29 19:43:35 +02:00
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT size_t h_malloc_object_size_fast(void *p) {
|
2018-09-11 20:13:18 +02:00
|
|
|
if (p == NULL) {
|
2018-08-29 19:43:35 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p >= ro.slab_region_start && p < ro.slab_region_end) {
|
2018-09-02 14:36:48 +02:00
|
|
|
size_t size = slab_usable_size(p);
|
|
|
|
return size ? size - canary_size : 0;
|
2018-08-29 19:43:35 +02:00
|
|
|
}
|
|
|
|
|
2018-09-11 20:13:18 +02:00
|
|
|
if (unlikely(!is_init())) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-29 19:43:35 +02:00
|
|
|
return SIZE_MAX;
|
|
|
|
}
|
|
|
|
|
2018-08-21 21:23:22 +02:00
|
|
|
EXPORT int h_mallopt(UNUSED int param, UNUSED int value) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-30 15:29:15 +02:00
|
|
|
EXPORT int h_malloc_trim(UNUSED size_t pad) {
|
2018-09-07 08:53:15 +02:00
|
|
|
if (unlikely(!is_init())) {
|
2018-08-21 21:23:22 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-30 11:18:38 +02:00
|
|
|
bool is_trimmed = false;
|
|
|
|
|
|
|
|
// skip zero byte size class since there's nothing to change
|
|
|
|
for (unsigned class = 1; class < N_SIZE_CLASSES; class++) {
|
|
|
|
struct size_class *c = &size_class_metadata[class];
|
|
|
|
size_t slab_size = get_slab_size(size_class_slots[class], size_classes[class]);
|
|
|
|
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_lock(&c->lock);
|
2018-08-30 11:18:38 +02:00
|
|
|
struct slab_metadata *iterator = c->empty_slabs;
|
|
|
|
while (iterator) {
|
|
|
|
void *slab = get_slab(c, slab_size, iterator);
|
|
|
|
if (memory_map_fixed(slab, slab_size)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct slab_metadata *trimmed = iterator;
|
|
|
|
iterator = iterator->next;
|
2018-08-30 12:44:58 +02:00
|
|
|
c->empty_slabs_total -= slab_size;
|
2018-08-30 11:18:38 +02:00
|
|
|
|
2018-09-01 04:42:34 +02:00
|
|
|
enqueue_free_slab(c, trimmed);
|
2018-08-30 11:18:38 +02:00
|
|
|
|
|
|
|
is_trimmed = true;
|
|
|
|
}
|
|
|
|
c->empty_slabs = iterator;
|
2018-09-07 07:08:51 +02:00
|
|
|
mutex_unlock(&c->lock);
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
2018-08-30 11:18:38 +02:00
|
|
|
return is_trimmed;
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT void h_malloc_stats(void) {}
|
|
|
|
|
2018-09-06 20:35:08 +02:00
|
|
|
#if defined(__GLIBC__) || defined(__ANDROID__)
|
2018-08-21 21:23:22 +02:00
|
|
|
EXPORT struct mallinfo h_mallinfo(void) {
|
2018-09-02 11:08:45 +02:00
|
|
|
return (struct mallinfo){0};
|
2018-08-21 21:23:22 +02:00
|
|
|
}
|
2018-09-06 20:35:08 +02:00
|
|
|
#endif
|
2018-08-21 21:23:22 +02:00
|
|
|
|
|
|
|
EXPORT int h_malloc_info(UNUSED int options, UNUSED FILE *fp) {
|
|
|
|
errno = ENOSYS;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
COLD EXPORT void *h_malloc_get_state(void) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
COLD EXPORT int h_malloc_set_state(UNUSED void *state) {
|
|
|
|
return -2;
|
|
|
|
}
|
2018-09-02 11:08:45 +02:00
|
|
|
|
|
|
|
#ifdef __ANDROID__
|
|
|
|
EXPORT size_t __mallinfo_narenas(void) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT size_t __mallinfo_nbins(void) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT struct mallinfo __mallinfo_arena_info(UNUSED size_t arena) {
|
|
|
|
return (struct mallinfo){0};
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT struct mallinfo __mallinfo_bin_info(UNUSED size_t arena, UNUSED size_t bin) {
|
|
|
|
return (struct mallinfo){0};
|
|
|
|
}
|
|
|
|
|
|
|
|
COLD EXPORT int h_iterate(UNUSED uintptr_t base, UNUSED size_t size,
|
|
|
|
UNUSED void (*callback)(uintptr_t ptr, size_t size, void *arg),
|
|
|
|
UNUSED void *arg) {
|
|
|
|
fatal_error("not implemented");
|
|
|
|
}
|
|
|
|
|
|
|
|
COLD EXPORT void h_malloc_disable(void) {
|
2018-09-06 20:17:06 +02:00
|
|
|
full_lock();
|
2018-09-02 11:08:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
COLD EXPORT void h_malloc_enable(void) {
|
2018-09-06 20:17:06 +02:00
|
|
|
full_unlock();
|
2018-09-02 11:08:45 +02:00
|
|
|
}
|
|
|
|
#endif
|