Compare commits

..

No commits in common. "main" and "2024091700" have entirely different histories.

13 changed files with 63 additions and 125 deletions

View file

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
version: [12, 13, 14]
version: [12]
steps:
- uses: actions/checkout@v4
- name: Setting up gcc version
@ -24,11 +24,9 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
version: [14, 15, 16, 17, 18]
version: [14, 15]
steps:
- uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends clang-14 clang-15
- name: Setting up clang version
run: |
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${{ matrix.version }} 100

View file

@ -5,6 +5,8 @@ common_cflags = [
"-fPIC",
"-fvisibility=hidden",
//"-fno-plt",
"-Wall",
"-Wextra",
"-Wcast-align",
"-Wcast-qual",
"-Wwrite-strings",

View file

@ -1,4 +1,4 @@
Copyright © 2018-2025 GrapheneOS
Copyright © 2018-2024 GrapheneOS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View file

@ -83,7 +83,7 @@ there will be custom integration offering better performance in the future
along with other hardening for the C standard library implementation.
For Android, only the current generation, actively developed maintenance branch of the Android
Open Source Project will be supported, which currently means `android15-release`.
Open Source Project will be supported, which currently means `android13-qpr2-release`.
## Testing
@ -731,7 +731,7 @@ Random tags are set for all slab allocations when allocated, with 4 excluded val
3. the current (or previous) tag used for the slot to the left
4. the current (or previous) tag used for the slot to the right
When a slab allocation is freed, the reserved `0` tag is set for the slot.
When a slab allocation is freed, the reserved `0` tag is set for the slot.
Slab allocation slots are cleared before reuse when memory tagging is enabled.
This ensures the following properties:
@ -740,7 +740,7 @@ This ensures the following properties:
- Use-after-free are deterministically detected until the freed slot goes through
both the random and FIFO quarantines, gets allocated again, goes through both
quarantines again and then finally gets allocated again for a 2nd time.
- Since the default `0` tag is reserved, untagged pointers can't access slab
- Since the default `0` tag is reserved, untagged pointers can't access slab
allocations and vice versa.
Slab allocations are done in a statically reserved region for each size class

View file

@ -346,6 +346,6 @@ int main(int argc, char **argv) {
test_fn();
do_context_switch();
return 0;
}

View file

@ -41,7 +41,7 @@ static const unsigned rounds = 8;
a = PLUS(a, b); d = ROTATE(XOR(d, a), 8); \
c = PLUS(c, d); b = ROTATE(XOR(b, c), 7);
static const char sigma[16] NONSTRING = "expand 32-byte k";
static const char sigma[16] = "expand 32-byte k";
void chacha_keysetup(chacha_ctx *x, const u8 *k) {
x->input[0] = U8TO32_LITTLE(sigma + 0);

View file

@ -94,24 +94,6 @@ static inline bool is_memtag_enabled(void) {
}
#endif
static void *memory_map_tagged(size_t size) {
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
return memory_map_mte(size);
}
#endif
return memory_map(size);
}
static bool memory_map_fixed_tagged(void *ptr, size_t size) {
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
return memory_map_fixed_mte(ptr, size);
}
#endif
return memory_map_fixed(ptr, size);
}
#define SLAB_METADATA_COUNT
struct slab_metadata {
@ -488,7 +470,7 @@ static void write_after_free_check(const char *p, size_t size) {
}
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
return;
}
#endif
@ -523,7 +505,7 @@ static void set_slab_canary_value(UNUSED struct slab_metadata *metadata, UNUSED
static void set_canary(UNUSED const struct slab_metadata *metadata, UNUSED void *p, UNUSED size_t size) {
#if SLAB_CANARY
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
return;
}
#endif
@ -535,7 +517,7 @@ static void set_canary(UNUSED const struct slab_metadata *metadata, UNUSED void
static void check_canary(UNUSED const struct slab_metadata *metadata, UNUSED const void *p, UNUSED size_t size) {
#if SLAB_CANARY
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
return;
}
#endif
@ -642,7 +624,7 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
write_after_free_check(p, size - canary_size);
set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
@ -679,7 +661,7 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
if (requested_size) {
set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
@ -706,7 +688,7 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
if (requested_size) {
set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
@ -735,7 +717,7 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
write_after_free_check(p, size - canary_size);
set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
@ -823,7 +805,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
bool skip_zero = false;
#ifdef HAS_ARM_MTE
if (likely51(is_memtag_enabled())) {
if (likely(is_memtag_enabled())) {
arm_mte_tag_and_clear_mem(set_pointer_tag(p, RESERVED_TAG), size);
// metadata->arm_mte_tags is intentionally not updated, see tag_and_clear_slab_slot()
skip_zero = true;
@ -908,7 +890,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
int saved_errno = errno;
if (!memory_map_fixed_tagged(slab, slab_size)) {
if (!memory_map_fixed(slab, slab_size)) {
label_slab(slab, slab_size, class);
stats_slab_deallocate(c, slab_size);
enqueue_free_slab(c, metadata);
@ -1260,7 +1242,15 @@ COLD static void init_slow_path(void) {
if (unlikely(memory_protect_rw_metadata(ra->regions, ra->total * sizeof(struct region_metadata)))) {
fatal_error("failed to unprotect memory for regions table");
}
ro.slab_region_start = memory_map_tagged(slab_region_size);
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
ro.slab_region_start = memory_map_mte(slab_region_size);
} else {
ro.slab_region_start = memory_map(slab_region_size);
}
#else
ro.slab_region_start = memory_map(slab_region_size);
#endif
if (unlikely(ro.slab_region_start == NULL)) {
fatal_error("failed to allocate slab region");
}
@ -1905,7 +1895,7 @@ EXPORT int h_malloc_trim(UNUSED size_t pad) {
struct slab_metadata *iterator = c->empty_slabs;
while (iterator) {
void *slab = get_slab(c, slab_size, iterator);
if (memory_map_fixed_tagged(slab, slab_size)) {
if (memory_map_fixed(slab, slab_size)) {
break;
}
label_slab(slab, slab_size, class);

View file

@ -17,8 +17,8 @@
#include "memory.h"
#include "util.h"
static void *memory_map_prot(size_t size, int prot) {
void *p = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
void *memory_map(size_t size) {
void *p = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (unlikely(p == MAP_FAILED)) {
if (errno != ENOMEM) {
fatal_error("non-ENOMEM mmap failure");
@ -28,19 +28,22 @@ static void *memory_map_prot(size_t size, int prot) {
return p;
}
void *memory_map(size_t size) {
return memory_map_prot(size, PROT_NONE);
}
#ifdef HAS_ARM_MTE
// Note that PROT_MTE can't be cleared via mprotect
void *memory_map_mte(size_t size) {
return memory_map_prot(size, PROT_MTE);
void *p = mmap(NULL, size, PROT_MTE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (unlikely(p == MAP_FAILED)) {
if (errno != ENOMEM) {
fatal_error("non-ENOMEM MTE mmap failure");
}
return NULL;
}
return p;
}
#endif
static bool memory_map_fixed_prot(void *ptr, size_t size, int prot) {
void *p = mmap(ptr, size, prot, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
bool memory_map_fixed(void *ptr, size_t size) {
void *p = mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
bool ret = p == MAP_FAILED;
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM MAP_FIXED mmap failure");
@ -48,17 +51,6 @@ static bool memory_map_fixed_prot(void *ptr, size_t size, int prot) {
return ret;
}
bool memory_map_fixed(void *ptr, size_t size) {
return memory_map_fixed_prot(ptr, size, PROT_NONE);
}
#ifdef HAS_ARM_MTE
// Note that PROT_MTE can't be cleared via mprotect
bool memory_map_fixed_mte(void *ptr, size_t size) {
return memory_map_fixed_prot(ptr, size, PROT_MTE);
}
#endif
bool memory_unmap(void *ptr, size_t size) {
bool ret = munmap(ptr, size);
if (unlikely(ret) && errno != ENOMEM) {

View file

@ -15,9 +15,6 @@ void *memory_map(size_t size);
void *memory_map_mte(size_t size);
#endif
bool memory_map_fixed(void *ptr, size_t size);
#ifdef HAS_ARM_MTE
bool memory_map_fixed_mte(void *ptr, size_t size);
#endif
bool memory_unmap(void *ptr, size_t size);
bool memory_protect_ro(void *ptr, size_t size);
bool memory_protect_rw(void *ptr, size_t size);

View file

@ -1,6 +1,5 @@
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#if defined(__GLIBC__) || defined(__ANDROID__)
#include <malloc.h>

View file

@ -11,11 +11,9 @@
#ifndef LIBDIVIDE_H
#define LIBDIVIDE_H
// *** Version numbers are auto generated - do not edit ***
#define LIBDIVIDE_VERSION "5.2.0"
#define LIBDIVIDE_VERSION "5.1"
#define LIBDIVIDE_VERSION_MAJOR 5
#define LIBDIVIDE_VERSION_MINOR 2
#define LIBDIVIDE_VERSION_PATCH 0
#define LIBDIVIDE_VERSION_MINOR 1
#include <stdint.h>
@ -36,15 +34,8 @@
#include <arm_neon.h>
#endif
// Clang-cl prior to Visual Studio 2022 doesn't include __umulh/__mulh intrinsics
#if defined(_MSC_VER) && defined(LIBDIVIDE_X86_64) && (!defined(__clang__) || _MSC_VER>1930)
#define LIBDIVIDE_X64_INTRINSICS
#endif
#if defined(_MSC_VER)
#if defined(LIBDIVIDE_X64_INTRINSICS)
#include <intrin.h>
#endif
#pragma warning(push)
// disable warning C4146: unary minus operator applied
// to unsigned type, result still unsigned
@ -247,28 +238,18 @@ static LIBDIVIDE_INLINE struct libdivide_u32_branchfree_t libdivide_u32_branchfr
static LIBDIVIDE_INLINE struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
static LIBDIVIDE_INLINE struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
static LIBDIVIDE_INLINE int16_t libdivide_s16_do_raw(
int16_t numer, int16_t magic, uint8_t more);
static LIBDIVIDE_INLINE int16_t libdivide_s16_do_raw(int16_t numer, int16_t magic, uint8_t more);
static LIBDIVIDE_INLINE int16_t libdivide_s16_do(
int16_t numer, const struct libdivide_s16_t *denom);
static LIBDIVIDE_INLINE uint16_t libdivide_u16_do_raw(
uint16_t numer, uint16_t magic, uint8_t more);
static LIBDIVIDE_INLINE uint16_t libdivide_u16_do_raw(uint16_t numer, uint16_t magic, uint8_t more);
static LIBDIVIDE_INLINE uint16_t libdivide_u16_do(
uint16_t numer, const struct libdivide_u16_t *denom);
static LIBDIVIDE_INLINE int32_t libdivide_s32_do_raw(
int32_t numer, int32_t magic, uint8_t more);
static LIBDIVIDE_INLINE int32_t libdivide_s32_do(
int32_t numer, const struct libdivide_s32_t *denom);
static LIBDIVIDE_INLINE uint32_t libdivide_u32_do_raw(
uint32_t numer, uint32_t magic, uint8_t more);
static LIBDIVIDE_INLINE uint32_t libdivide_u32_do(
uint32_t numer, const struct libdivide_u32_t *denom);
static LIBDIVIDE_INLINE int64_t libdivide_s64_do_raw(
int64_t numer, int64_t magic, uint8_t more);
static LIBDIVIDE_INLINE int64_t libdivide_s64_do(
int64_t numer, const struct libdivide_s64_t *denom);
static LIBDIVIDE_INLINE uint64_t libdivide_u64_do_raw(
uint64_t numer, uint64_t magic, uint8_t more);
static LIBDIVIDE_INLINE uint64_t libdivide_u64_do(
uint64_t numer, const struct libdivide_u64_t *denom);
@ -334,7 +315,7 @@ static LIBDIVIDE_INLINE int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
}
static LIBDIVIDE_INLINE uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
#if defined(LIBDIVIDE_X64_INTRINSICS)
#if defined(LIBDIVIDE_VC) && defined(LIBDIVIDE_X86_64)
return __umulh(x, y);
#elif defined(HAS_INT128_T)
__uint128_t xl = x, yl = y;
@ -360,7 +341,7 @@ static LIBDIVIDE_INLINE uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
}
static LIBDIVIDE_INLINE int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
#if defined(LIBDIVIDE_X64_INTRINSICS)
#if defined(LIBDIVIDE_VC) && defined(LIBDIVIDE_X86_64)
return __mulh(x, y);
#elif defined(HAS_INT128_T)
__int128_t xl = x, yl = y;
@ -933,11 +914,12 @@ struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
return ret;
}
uint32_t libdivide_u32_do_raw(uint32_t numer, uint32_t magic, uint8_t more) {
if (!magic) {
uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
uint8_t more = denom->more;
if (!denom->magic) {
return numer >> more;
} else {
uint32_t q = libdivide_mullhi_u32(magic, numer);
uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
uint32_t t = ((numer - q) >> 1) + q;
return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
@ -949,10 +931,6 @@ uint32_t libdivide_u32_do_raw(uint32_t numer, uint32_t magic, uint8_t more) {
}
}
uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
return libdivide_u32_do_raw(numer, denom->magic, denom->more);
}
uint32_t libdivide_u32_branchfree_do(
uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
@ -1096,11 +1074,12 @@ struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
return ret;
}
uint64_t libdivide_u64_do_raw(uint64_t numer, uint64_t magic, uint8_t more) {
if (!magic) {
uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
uint8_t more = denom->more;
if (!denom->magic) {
return numer >> more;
} else {
uint64_t q = libdivide_mullhi_u64(magic, numer);
uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
uint64_t t = ((numer - q) >> 1) + q;
return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
@ -1112,10 +1091,6 @@ uint64_t libdivide_u64_do_raw(uint64_t numer, uint64_t magic, uint8_t more) {
}
}
uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
return libdivide_u64_do_raw(numer, denom->magic, denom->more);
}
uint64_t libdivide_u64_branchfree_do(
uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
@ -1455,10 +1430,11 @@ struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
return result;
}
int32_t libdivide_s32_do_raw(int32_t numer, int32_t magic, uint8_t more) {
int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
uint8_t more = denom->more;
uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
if (!magic) {
if (!denom->magic) {
uint32_t sign = (int8_t)more >> 7;
uint32_t mask = ((uint32_t)1 << shift) - 1;
uint32_t uq = numer + ((numer >> 31) & mask);
@ -1467,7 +1443,7 @@ int32_t libdivide_s32_do_raw(int32_t numer, int32_t magic, uint8_t more) {
q = (q ^ sign) - sign;
return q;
} else {
uint32_t uq = (uint32_t)libdivide_mullhi_s32(magic, numer);
uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
// must be arithmetic shift and then sign extend
int32_t sign = (int8_t)more >> 7;
@ -1482,10 +1458,6 @@ int32_t libdivide_s32_do_raw(int32_t numer, int32_t magic, uint8_t more) {
}
}
int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
return libdivide_s32_do_raw(numer, denom->magic, denom->more);
}
int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
uint8_t more = denom->more;
uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
@ -1627,10 +1599,11 @@ struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
return ret;
}
int64_t libdivide_s64_do_raw(int64_t numer, int64_t magic, uint8_t more) {
int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
uint8_t more = denom->more;
uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
if (!magic) { // shift path
if (!denom->magic) { // shift path
uint64_t mask = ((uint64_t)1 << shift) - 1;
uint64_t uq = numer + ((numer >> 63) & mask);
int64_t q = (int64_t)uq;
@ -1640,7 +1613,7 @@ int64_t libdivide_s64_do_raw(int64_t numer, int64_t magic, uint8_t more) {
q = (q ^ sign) - sign;
return q;
} else {
uint64_t uq = (uint64_t)libdivide_mullhi_s64(magic, numer);
uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
if (more & LIBDIVIDE_ADD_MARKER) {
// must be arithmetic shift and then sign extend
int64_t sign = (int8_t)more >> 7;
@ -1655,10 +1628,6 @@ int64_t libdivide_s64_do_raw(int64_t numer, int64_t magic, uint8_t more) {
}
}
int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
return libdivide_s64_do_raw(numer, denom->magic, denom->more);
}
int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
uint8_t more = denom->more;
uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;

9
util.h
View file

@ -9,9 +9,7 @@
#define noreturn __attribute__((noreturn))
#define likely(x) __builtin_expect(!!(x), 1)
#define likely51(x) __builtin_expect_with_probability(!!(x), 1, 0.51)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define unlikely51(x) __builtin_expect_with_probability(!!(x), 0, 0.51)
#define min(x, y) ({ \
__typeof__(x) _x = (x); \
@ -32,13 +30,6 @@
#define STRINGIFY(s) #s
#define ALIAS(f) __attribute__((alias(STRINGIFY(f))))
// supported since GCC 15
#if __has_attribute (nonstring)
# define NONSTRING __attribute__ ((nonstring))
#else
# define NONSTRING
#endif
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;