mirror of
https://github.com/GrapheneOS/hardened_malloc.git
synced 2025-04-19 22:10:19 +02:00
Compare commits
3 commits
95c8641aee
...
7673582e23
Author | SHA1 | Date | |
---|---|---|---|
|
7673582e23 | ||
|
7481c8857f | ||
|
96836f463b |
4 changed files with 25 additions and 1 deletions
2
LICENSE
2
LICENSE
|
@ -1,4 +1,4 @@
|
||||||
Copyright © 2018-2024 GrapheneOS
|
Copyright © 2018-2025 GrapheneOS
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
@ -1295,7 +1295,12 @@ COLD static void init_slow_path(void) {
|
||||||
|
|
||||||
atomic_store_explicit(&ro.slab_region_end, slab_region_end, memory_order_release);
|
atomic_store_explicit(&ro.slab_region_end, slab_region_end, memory_order_release);
|
||||||
|
|
||||||
|
#if defined(__ANDROID__) && defined(HAS_ARM_MTE)
|
||||||
|
/* Do not seal to support disabling memory tagging */
|
||||||
if (unlikely(memory_protect_ro(&ro, sizeof(ro)))) {
|
if (unlikely(memory_protect_ro(&ro, sizeof(ro)))) {
|
||||||
|
#else
|
||||||
|
if (unlikely(memory_protect_seal(&ro, sizeof(ro)))) {
|
||||||
|
#endif
|
||||||
fatal_error("failed to protect allocator data");
|
fatal_error("failed to protect allocator data");
|
||||||
}
|
}
|
||||||
memory_set_name(&ro, sizeof(ro), "malloc read-only after init");
|
memory_set_name(&ro, sizeof(ro), "malloc read-only after init");
|
||||||
|
|
18
memory.c
18
memory.c
|
@ -1,6 +1,8 @@
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
|
#include <sys/syscall.h>
|
||||||
|
|
||||||
#ifdef LABEL_MEMORY
|
#ifdef LABEL_MEMORY
|
||||||
#include <sys/prctl.h>
|
#include <sys/prctl.h>
|
||||||
|
@ -91,6 +93,22 @@ bool memory_protect_rw_metadata(void *ptr, size_t size) {
|
||||||
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, get_metadata_key());
|
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, get_metadata_key());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
COLD bool memory_protect_seal(void *ptr, size_t size) {
|
||||||
|
#if defined(__linux__) && defined(__NR_mseal)
|
||||||
|
/* supported since Linux 6.10 */
|
||||||
|
int ret = syscall(__NR_mseal, ptr, size, 0);
|
||||||
|
if (ret == 0)
|
||||||
|
return false;
|
||||||
|
if (unlikely(errno == ENOMEM))
|
||||||
|
return true;
|
||||||
|
if (errno == ENOSYS)
|
||||||
|
return memory_protect_ro(ptr, size);
|
||||||
|
fatal_error("non-ENOMEM and non-ENOSYS mseal failure");
|
||||||
|
#else
|
||||||
|
return memory_protect_ro(ptr, size);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef HAVE_COMPATIBLE_MREMAP
|
#ifdef HAVE_COMPATIBLE_MREMAP
|
||||||
bool memory_remap(void *old, size_t old_size, size_t new_size) {
|
bool memory_remap(void *old, size_t old_size, size_t new_size) {
|
||||||
void *ptr = mremap(old, old_size, new_size, 0);
|
void *ptr = mremap(old, old_size, new_size, 0);
|
||||||
|
|
1
memory.h
1
memory.h
|
@ -22,6 +22,7 @@ bool memory_unmap(void *ptr, size_t size);
|
||||||
bool memory_protect_ro(void *ptr, size_t size);
|
bool memory_protect_ro(void *ptr, size_t size);
|
||||||
bool memory_protect_rw(void *ptr, size_t size);
|
bool memory_protect_rw(void *ptr, size_t size);
|
||||||
bool memory_protect_rw_metadata(void *ptr, size_t size);
|
bool memory_protect_rw_metadata(void *ptr, size_t size);
|
||||||
|
bool memory_protect_seal(void *ptr, size_t size);
|
||||||
#ifdef HAVE_COMPATIBLE_MREMAP
|
#ifdef HAVE_COMPATIBLE_MREMAP
|
||||||
bool memory_remap(void *old, size_t old_size, size_t new_size);
|
bool memory_remap(void *old, size_t old_size, size_t new_size);
|
||||||
bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
|
bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
|
||||||
|
|
Loading…
Add table
Reference in a new issue