mirror of
https://github.com/GrapheneOS/hardened_malloc.git
synced 2025-12-10 00:16:32 +01:00
Compare commits
No commits in common. "main" and "2024121200" have entirely different histories.
main
...
2024121200
10 changed files with 54 additions and 97 deletions
14
.github/workflows/build-and-test.yml
vendored
14
.github/workflows/build-and-test.yml
vendored
|
|
@ -11,9 +11,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
version: [14]
|
||||
version: [12]
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setting up gcc version
|
||||
run: |
|
||||
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${{ matrix.version }} 100
|
||||
|
|
@ -24,11 +24,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
version: [19, 20]
|
||||
version: [14, 15]
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends clang-19 clang-20
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setting up clang version
|
||||
run: |
|
||||
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${{ matrix.version }} 100
|
||||
|
|
@ -40,7 +38,7 @@ jobs:
|
|||
container:
|
||||
image: alpine:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: apk update && apk add build-base python3
|
||||
- name: Build
|
||||
|
|
@ -48,7 +46,7 @@ jobs:
|
|||
build-ubuntu-gcc-aarch64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgcc-s1-arm64-cross cpp-aarch64-linux-gnu
|
||||
- name: Build
|
||||
|
|
|
|||
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -1,2 +1,2 @@
|
|||
/out/
|
||||
/out-light/
|
||||
out/
|
||||
out-light/
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,4 +1,4 @@
|
|||
Copyright © 2018-2025 GrapheneOS
|
||||
Copyright © 2018-2024 GrapheneOS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
43
README.md
43
README.md
|
|
@ -1,4 +1,4 @@
|
|||
# hardened_malloc
|
||||
# Hardened malloc
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Dependencies](#dependencies)
|
||||
|
|
@ -65,14 +65,14 @@ used instead as this allocator fundamentally doesn't support that environment.
|
|||
|
||||
## Dependencies
|
||||
|
||||
Debian stable (currently Debian 13) determines the most ancient set of
|
||||
Debian stable (currently Debian 12) determines the most ancient set of
|
||||
supported dependencies:
|
||||
|
||||
* glibc 2.41
|
||||
* Linux 6.12
|
||||
* Clang 19.1.7 or GCC 14.2.0
|
||||
* glibc 2.36
|
||||
* Linux 6.1
|
||||
* Clang 14.0.6 or GCC 12.2.0
|
||||
|
||||
For Android, the Linux GKI 6.1, 6.6 and 6.12 branches are supported.
|
||||
For Android, the Linux GKI 5.10, 5.15 and 6.1 branches are supported.
|
||||
|
||||
However, using more recent releases is highly recommended. Older versions of
|
||||
the dependencies may be compatible at the moment but are not tested and will
|
||||
|
|
@ -83,7 +83,7 @@ there will be custom integration offering better performance in the future
|
|||
along with other hardening for the C standard library implementation.
|
||||
|
||||
For Android, only the current generation, actively developed maintenance branch of the Android
|
||||
Open Source Project will be supported, which currently means `android16-qpr1-release`.
|
||||
Open Source Project will be supported, which currently means `android15-release`.
|
||||
|
||||
## Testing
|
||||
|
||||
|
|
@ -159,17 +159,14 @@ line to the `/etc/ld.so.preload` configuration file:
|
|||
The format of this configuration file is a whitespace-separated list, so it's
|
||||
good practice to put each library on a separate line.
|
||||
|
||||
For maximum compatibility `libhardened_malloc.so` can be installed into
|
||||
`/usr/lib/` to avoid preload failures caused by AppArmor profiles or systemd
|
||||
ExecPaths= restrictions. Check for logs of the following format:
|
||||
|
||||
ERROR: ld.so: object '/usr/local/lib/libhardened_malloc.so' from /etc/ld.so.preload cannot be preloaded (failed to map segment from shared object): ignored.
|
||||
On Debian systems `libhardened_malloc.so` should be installed into `/usr/lib/`
|
||||
to avoid preload failures caused by AppArmor profile restrictions.
|
||||
|
||||
Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis
|
||||
will not work when `AT_SECURE` is set such as with setuid binaries. It's also
|
||||
generally not a recommended approach for production usage. The recommendation
|
||||
is to enable it globally and make exceptions for performance critical cases by
|
||||
running the application in a container/namespace without it enabled.
|
||||
running the application in a container / namespace without it enabled.
|
||||
|
||||
Make sure to raise `vm.max_map_count` substantially too to accommodate the very
|
||||
large number of guard pages created by hardened\_malloc. As an example, in
|
||||
|
|
@ -255,7 +252,7 @@ The following boolean configuration options are available:
|
|||
* `CONFIG_WRITE_AFTER_FREE_CHECK`: `true` (default) or `false` to control
|
||||
sanity checking that new small allocations contain zeroed memory. This can
|
||||
detect writes caused by a write-after-free vulnerability and mixes well with
|
||||
the features for making memory reuse randomized/delayed. This has a
|
||||
the features for making memory reuse randomized / delayed. This has a
|
||||
performance cost scaling to the size of the allocation, which is usually
|
||||
acceptable. This is not relevant to large allocations because they're always
|
||||
a fresh memory mapping from the kernel.
|
||||
|
|
@ -341,7 +338,7 @@ larger caches can substantially improves performance).
|
|||
|
||||
## Core design
|
||||
|
||||
The core design of the allocator is very simple/minimalist. The allocator is
|
||||
The core design of the allocator is very simple / minimalist. The allocator is
|
||||
exclusive to 64-bit platforms in order to take full advantage of the abundant
|
||||
address space without being constrained by needing to keep the design
|
||||
compatible with 32-bit.
|
||||
|
|
@ -373,13 +370,13 @@ whether it's free, along with a separate bitmap for tracking allocations in the
|
|||
quarantine. The slab metadata entries in the array have intrusive lists
|
||||
threaded through them to track partial slabs (partially filled, and these are
|
||||
the first choice for allocation), empty slabs (limited amount of cached free
|
||||
memory) and free slabs (purged/memory protected).
|
||||
memory) and free slabs (purged / memory protected).
|
||||
|
||||
Large allocations are tracked via a global hash table mapping their address to
|
||||
their size and random guard size. They're simply memory mappings and get mapped
|
||||
on allocation and then unmapped on free. Large allocations are the only dynamic
|
||||
memory mappings made by the allocator, since the address space for allocator
|
||||
state (including both small/large allocation metadata) and slab allocations
|
||||
state (including both small / large allocation metadata) and slab allocations
|
||||
is statically reserved.
|
||||
|
||||
This allocator is aimed at production usage, not aiding with finding and fixing
|
||||
|
|
@ -390,7 +387,7 @@ messages. The design choices are based around minimizing overhead and
|
|||
maximizing security which often leads to different decisions than a tool
|
||||
attempting to find bugs. For example, it uses zero-based sanitization on free
|
||||
and doesn't minimize slack space from size class rounding between the end of an
|
||||
allocation and the canary/guard region. Zero-based filling has the least
|
||||
allocation and the canary / guard region. Zero-based filling has the least
|
||||
chance of uncovering latent bugs, but also the best chance of mitigating
|
||||
vulnerabilities. The canary feature is primarily meant to act as padding
|
||||
absorbing small overflows to render them harmless, so slack space is helpful
|
||||
|
|
@ -424,11 +421,11 @@ was a bit less important and if a core goal was finding latent bugs.
|
|||
* Top-level isolated regions for each arena
|
||||
* Divided up into isolated inner regions for each size class
|
||||
* High entropy random base for each size class region
|
||||
* No deterministic/low entropy offsets between allocations with
|
||||
* No deterministic / low entropy offsets between allocations with
|
||||
different size classes
|
||||
* Metadata is completely outside the slab allocation region
|
||||
* No references to metadata within the slab allocation region
|
||||
* No deterministic/low entropy offsets to metadata
|
||||
* No deterministic / low entropy offsets to metadata
|
||||
* Entire slab region starts out non-readable and non-writable
|
||||
* Slabs beyond the cache limit are purged and become non-readable and
|
||||
non-writable memory again
|
||||
|
|
@ -649,7 +646,7 @@ other. Static assignment can also reduce memory usage since threads may have
|
|||
varying usage of size classes.
|
||||
|
||||
When there's substantial allocation or deallocation pressure, the allocator
|
||||
does end up calling into the kernel to purge/protect unused slabs by
|
||||
does end up calling into the kernel to purge / protect unused slabs by
|
||||
replacing them with fresh `PROT_NONE` regions along with unprotecting slabs
|
||||
when partially filled and cached empty slabs are depleted. There will be
|
||||
configuration over the amount of cached empty slabs, but it's not entirely a
|
||||
|
|
@ -696,7 +693,7 @@ The secondary benefit of thread caches is being able to avoid the underlying
|
|||
allocator implementation entirely for some allocations and deallocations when
|
||||
they're mixed together rather than many allocations being done together or many
|
||||
frees being done together. The value of this depends a lot on the application
|
||||
and it's entirely unsuitable/incompatible with a hardened allocator since it
|
||||
and it's entirely unsuitable / incompatible with a hardened allocator since it
|
||||
bypasses all of the underlying security and would destroy much of the security
|
||||
value.
|
||||
|
||||
|
|
@ -960,7 +957,7 @@ doesn't handle large allocations within the arenas, so it presents those in the
|
|||
For example, with 4 arenas enabled, there will be a 5th arena in the statistics
|
||||
for the large allocations.
|
||||
|
||||
The `nmalloc`/`ndalloc` fields are 64-bit integers tracking allocation and
|
||||
The `nmalloc` / `ndalloc` fields are 64-bit integers tracking allocation and
|
||||
deallocation count. These are defined as wrapping on overflow, per the jemalloc
|
||||
implementation.
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ void *set_pointer_tag(void *ptr, u8 tag) {
|
|||
return (void *) (((uintptr_t) tag << 56) | (uintptr_t) untag_pointer(ptr));
|
||||
}
|
||||
|
||||
// This test checks that slab slot allocation uses tag that is distinct from tags of its neighbors
|
||||
// This test checks that slab slot allocation uses tag that is distint from tags of its neighbors
|
||||
// and from the tag of the previous allocation that used the same slot
|
||||
void tag_distinctness() {
|
||||
// tag 0 is reserved
|
||||
|
|
|
|||
2
chacha.c
2
chacha.c
|
|
@ -41,7 +41,7 @@ static const unsigned rounds = 8;
|
|||
a = PLUS(a, b); d = ROTATE(XOR(d, a), 8); \
|
||||
c = PLUS(c, d); b = ROTATE(XOR(b, c), 7);
|
||||
|
||||
static const char sigma[16] NONSTRING = "expand 32-byte k";
|
||||
static const char sigma[16] = "expand 32-byte k";
|
||||
|
||||
void chacha_keysetup(chacha_ctx *x, const u8 *k) {
|
||||
x->input[0] = U8TO32_LITTLE(sigma + 0);
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ class TestSimpleMemoryCorruption(unittest.TestCase):
|
|||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_invalid_malloc_usable_size_small_quarantine(self):
|
||||
def test_invalid_malloc_usable_size_small_quarantene(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_malloc_usable_size_small_quarantine")
|
||||
self.assertEqual(returncode, -6)
|
||||
|
|
|
|||
75
third_party/libdivide.h
vendored
75
third_party/libdivide.h
vendored
|
|
@ -11,11 +11,9 @@
|
|||
#ifndef LIBDIVIDE_H
|
||||
#define LIBDIVIDE_H
|
||||
|
||||
// *** Version numbers are auto generated - do not edit ***
|
||||
#define LIBDIVIDE_VERSION "5.2.0"
|
||||
#define LIBDIVIDE_VERSION "5.1"
|
||||
#define LIBDIVIDE_VERSION_MAJOR 5
|
||||
#define LIBDIVIDE_VERSION_MINOR 2
|
||||
#define LIBDIVIDE_VERSION_PATCH 0
|
||||
#define LIBDIVIDE_VERSION_MINOR 1
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
|
|
@ -36,15 +34,8 @@
|
|||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
// Clang-cl prior to Visual Studio 2022 doesn't include __umulh/__mulh intrinsics
|
||||
#if defined(_MSC_VER) && defined(LIBDIVIDE_X86_64) && (!defined(__clang__) || _MSC_VER>1930)
|
||||
#define LIBDIVIDE_X64_INTRINSICS
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#if defined(LIBDIVIDE_X64_INTRINSICS)
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
#pragma warning(push)
|
||||
// disable warning C4146: unary minus operator applied
|
||||
// to unsigned type, result still unsigned
|
||||
|
|
@ -247,28 +238,18 @@ static LIBDIVIDE_INLINE struct libdivide_u32_branchfree_t libdivide_u32_branchfr
|
|||
static LIBDIVIDE_INLINE struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
|
||||
static LIBDIVIDE_INLINE struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
|
||||
|
||||
static LIBDIVIDE_INLINE int16_t libdivide_s16_do_raw(
|
||||
int16_t numer, int16_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE int16_t libdivide_s16_do_raw(int16_t numer, int16_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE int16_t libdivide_s16_do(
|
||||
int16_t numer, const struct libdivide_s16_t *denom);
|
||||
static LIBDIVIDE_INLINE uint16_t libdivide_u16_do_raw(
|
||||
uint16_t numer, uint16_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE uint16_t libdivide_u16_do_raw(uint16_t numer, uint16_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE uint16_t libdivide_u16_do(
|
||||
uint16_t numer, const struct libdivide_u16_t *denom);
|
||||
static LIBDIVIDE_INLINE int32_t libdivide_s32_do_raw(
|
||||
int32_t numer, int32_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE int32_t libdivide_s32_do(
|
||||
int32_t numer, const struct libdivide_s32_t *denom);
|
||||
static LIBDIVIDE_INLINE uint32_t libdivide_u32_do_raw(
|
||||
uint32_t numer, uint32_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE uint32_t libdivide_u32_do(
|
||||
uint32_t numer, const struct libdivide_u32_t *denom);
|
||||
static LIBDIVIDE_INLINE int64_t libdivide_s64_do_raw(
|
||||
int64_t numer, int64_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE int64_t libdivide_s64_do(
|
||||
int64_t numer, const struct libdivide_s64_t *denom);
|
||||
static LIBDIVIDE_INLINE uint64_t libdivide_u64_do_raw(
|
||||
uint64_t numer, uint64_t magic, uint8_t more);
|
||||
static LIBDIVIDE_INLINE uint64_t libdivide_u64_do(
|
||||
uint64_t numer, const struct libdivide_u64_t *denom);
|
||||
|
||||
|
|
@ -334,7 +315,7 @@ static LIBDIVIDE_INLINE int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
|
|||
}
|
||||
|
||||
static LIBDIVIDE_INLINE uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
|
||||
#if defined(LIBDIVIDE_X64_INTRINSICS)
|
||||
#if defined(LIBDIVIDE_VC) && defined(LIBDIVIDE_X86_64)
|
||||
return __umulh(x, y);
|
||||
#elif defined(HAS_INT128_T)
|
||||
__uint128_t xl = x, yl = y;
|
||||
|
|
@ -360,7 +341,7 @@ static LIBDIVIDE_INLINE uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
|
|||
}
|
||||
|
||||
static LIBDIVIDE_INLINE int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
|
||||
#if defined(LIBDIVIDE_X64_INTRINSICS)
|
||||
#if defined(LIBDIVIDE_VC) && defined(LIBDIVIDE_X86_64)
|
||||
return __mulh(x, y);
|
||||
#elif defined(HAS_INT128_T)
|
||||
__int128_t xl = x, yl = y;
|
||||
|
|
@ -933,11 +914,12 @@ struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
uint32_t libdivide_u32_do_raw(uint32_t numer, uint32_t magic, uint8_t more) {
|
||||
if (!magic) {
|
||||
uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
|
||||
uint8_t more = denom->more;
|
||||
if (!denom->magic) {
|
||||
return numer >> more;
|
||||
} else {
|
||||
uint32_t q = libdivide_mullhi_u32(magic, numer);
|
||||
uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
|
||||
if (more & LIBDIVIDE_ADD_MARKER) {
|
||||
uint32_t t = ((numer - q) >> 1) + q;
|
||||
return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
|
||||
|
|
@ -949,10 +931,6 @@ uint32_t libdivide_u32_do_raw(uint32_t numer, uint32_t magic, uint8_t more) {
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
|
||||
return libdivide_u32_do_raw(numer, denom->magic, denom->more);
|
||||
}
|
||||
|
||||
uint32_t libdivide_u32_branchfree_do(
|
||||
uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
|
||||
uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
|
||||
|
|
@ -1096,11 +1074,12 @@ struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
uint64_t libdivide_u64_do_raw(uint64_t numer, uint64_t magic, uint8_t more) {
|
||||
if (!magic) {
|
||||
uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
|
||||
uint8_t more = denom->more;
|
||||
if (!denom->magic) {
|
||||
return numer >> more;
|
||||
} else {
|
||||
uint64_t q = libdivide_mullhi_u64(magic, numer);
|
||||
uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
|
||||
if (more & LIBDIVIDE_ADD_MARKER) {
|
||||
uint64_t t = ((numer - q) >> 1) + q;
|
||||
return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
|
||||
|
|
@ -1112,10 +1091,6 @@ uint64_t libdivide_u64_do_raw(uint64_t numer, uint64_t magic, uint8_t more) {
|
|||
}
|
||||
}
|
||||
|
||||
uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
|
||||
return libdivide_u64_do_raw(numer, denom->magic, denom->more);
|
||||
}
|
||||
|
||||
uint64_t libdivide_u64_branchfree_do(
|
||||
uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
|
||||
uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
|
||||
|
|
@ -1455,10 +1430,11 @@ struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
|
|||
return result;
|
||||
}
|
||||
|
||||
int32_t libdivide_s32_do_raw(int32_t numer, int32_t magic, uint8_t more) {
|
||||
int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
|
||||
uint8_t more = denom->more;
|
||||
uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
|
||||
|
||||
if (!magic) {
|
||||
if (!denom->magic) {
|
||||
uint32_t sign = (int8_t)more >> 7;
|
||||
uint32_t mask = ((uint32_t)1 << shift) - 1;
|
||||
uint32_t uq = numer + ((numer >> 31) & mask);
|
||||
|
|
@ -1467,7 +1443,7 @@ int32_t libdivide_s32_do_raw(int32_t numer, int32_t magic, uint8_t more) {
|
|||
q = (q ^ sign) - sign;
|
||||
return q;
|
||||
} else {
|
||||
uint32_t uq = (uint32_t)libdivide_mullhi_s32(magic, numer);
|
||||
uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
|
||||
if (more & LIBDIVIDE_ADD_MARKER) {
|
||||
// must be arithmetic shift and then sign extend
|
||||
int32_t sign = (int8_t)more >> 7;
|
||||
|
|
@ -1482,10 +1458,6 @@ int32_t libdivide_s32_do_raw(int32_t numer, int32_t magic, uint8_t more) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
|
||||
return libdivide_s32_do_raw(numer, denom->magic, denom->more);
|
||||
}
|
||||
|
||||
int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
|
||||
uint8_t more = denom->more;
|
||||
uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
|
||||
|
|
@ -1627,10 +1599,11 @@ struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
int64_t libdivide_s64_do_raw(int64_t numer, int64_t magic, uint8_t more) {
|
||||
int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
|
||||
uint8_t more = denom->more;
|
||||
uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
|
||||
|
||||
if (!magic) { // shift path
|
||||
if (!denom->magic) { // shift path
|
||||
uint64_t mask = ((uint64_t)1 << shift) - 1;
|
||||
uint64_t uq = numer + ((numer >> 63) & mask);
|
||||
int64_t q = (int64_t)uq;
|
||||
|
|
@ -1640,7 +1613,7 @@ int64_t libdivide_s64_do_raw(int64_t numer, int64_t magic, uint8_t more) {
|
|||
q = (q ^ sign) - sign;
|
||||
return q;
|
||||
} else {
|
||||
uint64_t uq = (uint64_t)libdivide_mullhi_s64(magic, numer);
|
||||
uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
|
||||
if (more & LIBDIVIDE_ADD_MARKER) {
|
||||
// must be arithmetic shift and then sign extend
|
||||
int64_t sign = (int8_t)more >> 7;
|
||||
|
|
@ -1655,10 +1628,6 @@ int64_t libdivide_s64_do_raw(int64_t numer, int64_t magic, uint8_t more) {
|
|||
}
|
||||
}
|
||||
|
||||
int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
|
||||
return libdivide_s64_do_raw(numer, denom->magic, denom->more);
|
||||
}
|
||||
|
||||
int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
|
||||
uint8_t more = denom->more;
|
||||
uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
|
||||
|
|
|
|||
7
util.h
7
util.h
|
|
@ -32,13 +32,6 @@
|
|||
#define STRINGIFY(s) #s
|
||||
#define ALIAS(f) __attribute__((alias(STRINGIFY(f))))
|
||||
|
||||
// supported since GCC 15
|
||||
#if __has_attribute (nonstring)
|
||||
# define NONSTRING __attribute__ ((nonstring))
|
||||
#else
|
||||
# define NONSTRING
|
||||
#endif
|
||||
|
||||
typedef uint8_t u8;
|
||||
typedef uint16_t u16;
|
||||
typedef uint32_t u32;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue