mte: move is_memtag_enabled to read-only allocator data

pull/226/head
Dmitry Muhomor 2023-10-28 22:55:34 +03:00 committed by Daniel Micay
parent 576328b1b4
commit 01a199e19e
2 changed files with 23 additions and 17 deletions

View File

@ -67,10 +67,6 @@ static atomic_uint thread_arena_counter = 0;
static const unsigned thread_arena = 0;
#endif
#ifdef MEMTAG
bool __is_memtag_enabled = true;
#endif
static union {
struct {
void *slab_region_start;
@ -80,6 +76,9 @@ static union {
struct region_metadata *regions[2];
#ifdef USE_PKEY
int metadata_pkey;
#endif
#ifdef MEMTAG
bool is_memtag_disabled;
#endif
};
char padding[PAGE_SIZE];
@ -89,6 +88,12 @@ static inline void *get_slab_region_end(void) {
return atomic_load_explicit(&ro.slab_region_end, memory_order_acquire);
}
#ifdef MEMTAG
static inline bool is_memtag_enabled(void) {
return !ro.is_memtag_disabled;
}
#endif
#define SLAB_METADATA_COUNT
struct slab_metadata {
@ -2152,7 +2157,20 @@ COLD EXPORT int h_malloc_set_state(UNUSED void *state) {
#ifdef __ANDROID__
COLD EXPORT void h_malloc_disable_memory_tagging(void) {
#ifdef HAS_ARM_MTE
__is_memtag_enabled = false;
if (!ro.is_memtag_disabled) {
if (is_init()) {
if (unlikely(memory_protect_rw(&ro, sizeof(ro)))) {
fatal_error("failed to unprotect allocator data");
}
ro.is_memtag_disabled = true;
if (unlikely(memory_protect_ro(&ro, sizeof(ro)))) {
fatal_error("failed to protect allocator data");
}
} else {
// bionic calls this function very early in some cases
ro.is_memtag_disabled = true;
}
}
#endif
}
#endif

View File

@ -10,18 +10,6 @@
#define TAG_WIDTH 4
#endif
#ifdef MEMTAG
extern bool __is_memtag_enabled;
#endif
static inline bool is_memtag_enabled(void) {
#ifdef MEMTAG
return __is_memtag_enabled;
#else
return false;
#endif
}
static inline void *untag_pointer(void *ptr) {
#ifdef HAS_ARM_MTE
const uintptr_t mask = UINTPTR_MAX >> 8;