mirror of
https://github.com/GrapheneOS/hardened_malloc.git
synced 2025-04-19 22:10:19 +02:00
Compare commits
358 commits
PQ3A.19060
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
7481c8857f | ||
|
1d7fc7ffe0 | ||
|
4fe9018b6f | ||
|
3ab23f7ebf | ||
|
c894f3ec1d | ||
|
c97263ef0c | ||
|
a7302add63 | ||
|
b1d9571fec | ||
|
e03579253a | ||
|
9739cb4690 | ||
|
aa950244f8 | ||
|
6402e2b0d4 | ||
|
e86192e7fe | ||
|
6ce663a8bd | ||
|
9ca9d2d925 | ||
|
3f07acfab1 | ||
|
749640c274 | ||
|
7268189933 | ||
|
3c1f40aff0 | ||
|
5fbbdc2ef8 | ||
|
7d2151e40c | ||
|
4756716904 | ||
|
a3bf742c3e | ||
|
53a45b4661 | ||
|
abe54dba27 | ||
|
365ee6900d | ||
|
7093fdc482 | ||
|
61821b02c8 | ||
|
3c274731ba | ||
|
4171bd164e | ||
|
352c083f65 | ||
|
88b3c1acf9 | ||
|
f793a3edf6 | ||
|
fd75fc1ba8 | ||
|
72dc236d5f | ||
|
be08eeee2d | ||
|
25f0fe9c69 | ||
|
c75cb4c3f3 | ||
|
b560431c01 | ||
|
009f2dad76 | ||
|
03883eb2ce | ||
|
7a6dbd8152 | ||
|
f16ef601d4 | ||
|
155800526a | ||
|
28d5d394cf | ||
|
577d9583eb | ||
|
93aa9eefe4 | ||
|
01a199e19e | ||
|
576328b1b4 | ||
|
5137d2da4d | ||
|
f042a6b9b0 | ||
|
001fc86585 | ||
|
70c91f4c3e | ||
|
e3686ae457 | ||
|
19a46e0f96 | ||
|
8d5c631224 | ||
|
903cba5a84 | ||
|
9cb4e6daf6 | ||
|
8696431b88 | ||
|
2d302f7d85 | ||
|
d5f9909eca | ||
|
5e1901e85d | ||
|
462c2c5293 | ||
|
8f3281ed6a | ||
|
7d75acc62a | ||
|
af866a7faa | ||
|
64dad0a69f | ||
|
95c4b40caf | ||
|
cc70583beb | ||
|
62a98efb13 | ||
|
d3152b8e8f | ||
|
2e9daf3122 | ||
|
6038030d0b | ||
|
4d23fa37ad | ||
|
6d36e758f5 | ||
|
cd9b875297 | ||
|
2250130c53 | ||
|
72dba6765f | ||
|
8f38bbdee6 | ||
|
dd427cb3b8 | ||
|
b5dd9d11d9 | ||
|
72fb3576f5 | ||
|
f8fec401c7 | ||
|
0d6d63cbe7 | ||
|
8fd31e4bc1 | ||
|
b511696c55 | ||
|
943704de7c | ||
|
04a86566c3 | ||
|
448170a412 | ||
|
995ce07d45 | ||
|
c9d1abcd7e | ||
|
8f0b252c33 | ||
|
3cffc1e1af | ||
|
ae2524bf88 | ||
|
e28addda19 | ||
|
9d89712386 | ||
|
84eadd8568 | ||
|
0bbcc5d610 | ||
|
3fa30842ed | ||
|
b3d78bd5f6 | ||
|
8d61e63274 | ||
|
422ee78b3e | ||
|
3e312695e1 | ||
|
81cf2f27a0 | ||
|
d8cb2d9f7a | ||
|
86f9c739ee | ||
|
536f852538 | ||
|
e814cf4f5c | ||
|
705211ef49 | ||
|
189d3362d5 | ||
|
e2bcf4a356 | ||
|
d470ae56a5 | ||
|
42b097f3b0 | ||
|
17891d743e | ||
|
efd71e70c7 | ||
|
a6d27848af | ||
|
110126d7f0 | ||
|
a2bdb4da27 | ||
|
0c0561e563 | ||
|
5a577e9ee0 | ||
|
b3372e1576 | ||
|
052b756840 | ||
|
001eb0687b | ||
|
2a5662948e | ||
|
d1c39edc9b | ||
|
aa1746a90d | ||
|
f3efc26638 | ||
|
78cbb964d4 | ||
|
36dfed3354 | ||
|
8a500088c6 | ||
|
c50d06bc6a | ||
|
645414cc9f | ||
|
13a1f578cb | ||
|
acda766e2c | ||
|
5f32942263 | ||
|
346529574d | ||
|
16c991b8f7 | ||
|
5f59ee3935 | ||
|
3696f071a4 | ||
|
7d6663ed80 | ||
|
c6af50d088 | ||
|
8ae78237ae | ||
|
3f8e9d3184 | ||
|
1e526fc36b | ||
|
c5be4b1888 | ||
|
ffdf7b1ee1 | ||
|
2d56c1de01 | ||
|
3878f4a5f4 | ||
|
de7a3b6e5a | ||
|
9142a9376b | ||
|
75e26afdb6 | ||
|
cff1d6d4b5 | ||
|
75952581ee | ||
|
a84d3f5310 | ||
|
0655c1d024 | ||
|
2b25c791ee | ||
|
e816c545ea | ||
|
06192ae499 | ||
|
4ccd6f16df | ||
|
9966adbdad | ||
|
769e01fc4b | ||
|
460fef456d | ||
|
1a650b0317 | ||
|
fa46a7a85d | ||
|
d8817417cc | ||
|
7106bff27f | ||
|
1bdbb2d3f7 | ||
|
a33d2ca97d | ||
|
aa94408cc2 | ||
|
8f9305df57 | ||
|
cc0a1e1736 | ||
|
3b72a4f810 | ||
|
e41d37c3de | ||
|
23969727d8 | ||
|
4d30b491e3 | ||
|
11207a9c98 | ||
|
801e8d959f | ||
|
8dfea34fc0 | ||
|
4d6456cf58 | ||
|
be6dde66f9 | ||
|
e0ecacff45 | ||
|
050871122b | ||
|
27fcfccb67 | ||
|
93a87ce30b | ||
|
da190f1469 | ||
|
b0f81365a8 | ||
|
c9820b6e37 | ||
|
f1cdc1e484 | ||
|
26b74b87bf | ||
|
89faba4232 | ||
|
a45dacc57b | ||
|
a71ab1a2eb | ||
|
96a322bcbe | ||
|
92a1e456d2 | ||
|
9706f5a311 | ||
|
440489af67 | ||
|
f9a8e7216b | ||
|
5c974bdf82 | ||
|
2335f56713 | ||
|
13a3aa16d0 | ||
|
8bfa1a7dd5 | ||
|
3952645318 | ||
|
1d15d34c7e | ||
|
29ffcdf810 | ||
|
f773a96b59 | ||
|
b84af9b499 | ||
|
73b78a8adb | ||
|
e77ffa76d9 | ||
|
86b0b3e452 | ||
|
7b03b5c629 | ||
|
db21ecd529 | ||
|
ee55acf116 | ||
|
a3b4c163eb | ||
|
325b82f1bd | ||
|
ddd14bc421 | ||
|
29b09648d6 | ||
|
1984cb3b3d | ||
|
76860c72e1 | ||
|
5c8b686370 | ||
|
5275563252 | ||
|
e9d9f70ad4 | ||
|
10c5d61187 | ||
|
b90f650153 | ||
|
8d0314295e | ||
|
b072022022 | ||
|
2bb1c39d31 | ||
|
0bf18b7c26 | ||
|
178d4f320f | ||
|
b9ebf47c7c | ||
|
8906c0941a | ||
|
59e174eee0 | ||
|
483b1d7b8b | ||
|
96eca21ac5 | ||
|
022b64791e | ||
|
b4bbd09f07 | ||
|
a88305c01b | ||
|
85c5c3736c | ||
|
96a9bcf3a1 | ||
|
41fb89517a | ||
|
50e0f1334c | ||
|
9fb2791af2 | ||
|
8974af86d1 | ||
|
d203d6c445 | ||
|
9f5e1f6eb9 | ||
|
1cba254452 | ||
|
730f148647 | ||
|
dd7291ebfe | ||
|
bcb93cab63 | ||
|
f214bd541a | ||
|
7804e263e9 | ||
|
de3fb50dcc | ||
|
b404d6da6e | ||
|
c9c7dca498 | ||
|
dcc80a01db | ||
|
722974f4e9 | ||
|
195bc8c92a | ||
|
577524798e | ||
|
467ba8440f | ||
|
067b3c864f | ||
|
82440e78d9 | ||
|
fc0bd78215 | ||
|
08a5f5ee0b | ||
|
e82367e1bf | ||
|
4a6bbe445c | ||
|
cf55ac0f6d | ||
|
b672316bc7 | ||
|
029a2edf28 | ||
|
35bd7cd76d | ||
|
0a3a726c93 | ||
|
19365c25d6 | ||
|
c75dcb9d9c | ||
|
d757835d90 | ||
|
2c421590b5 | ||
|
9f661f945c | ||
|
b160f723e0 | ||
|
ec6854c71b | ||
|
6b987e644b | ||
|
2a87f52fc1 | ||
|
466d351e93 | ||
|
0436227092 | ||
|
3af44d2e6a | ||
|
a5abe5add6 | ||
|
26134f9aaa | ||
|
449962e044 | ||
|
bee398f860 | ||
|
7c5c768e2f | ||
|
7945b3f109 | ||
|
dfa49481e5 | ||
|
2fbf7bb25e | ||
|
ac95820fae | ||
|
b48ac93b03 | ||
|
eff5037d64 | ||
|
97ea85f55d | ||
|
6f4de3971e | ||
|
cb73bccf35 | ||
|
74eb614f16 | ||
|
a28da3c65a | ||
|
fb9f5d630b | ||
|
8d648e2b25 | ||
|
6d78dec42a | ||
|
2e4ab73fb6 | ||
|
0e4ea0090b | ||
|
5b3d59ec7d | ||
|
7c455c3956 | ||
|
efda950994 | ||
|
40be77003e | ||
|
0af33616f0 | ||
|
c66cf10894 | ||
|
0129d8e470 | ||
|
5eefcd39b4 | ||
|
2288b3a754 | ||
|
f4afedb137 | ||
|
ac70e2c250 | ||
|
d0b466beb8 | ||
|
7a8c57d0f5 | ||
|
c4fc025fde | ||
|
58b56f10ea | ||
|
125efe99db | ||
|
77b242ea3f | ||
|
d37657e125 | ||
|
3c67708c3a | ||
|
2ad74515b1 | ||
|
abece7656b | ||
|
c70745ab15 | ||
|
7d4d2ef0fb | ||
|
8133444f43 | ||
|
8f9f2521a0 | ||
|
d8ebdea05f | ||
|
defd55f302 | ||
|
04f69d9f0d | ||
|
995d0580d1 | ||
|
8d2df1deb8 | ||
|
1bc201c4c1 | ||
|
cc8c4459e1 | ||
|
b6b910f032 | ||
|
24de5aab05 | ||
|
71e4577367 | ||
|
75e86914aa | ||
|
90d12fb340 | ||
|
77743e5a36 | ||
|
3ed6e546c8 | ||
|
d80919fa1e | ||
|
410e9efb93 | ||
|
7bcfa500be | ||
|
72a08f88fb | ||
|
a32e26b8e9 | ||
|
934ab4cb59 | ||
|
060f74b993 | ||
|
4d4277319a | ||
|
a579257a26 | ||
|
bb65d088dc | ||
|
706c1970b5 | ||
|
dba11c0091 | ||
|
539d4f0d37 | ||
|
bc75c4db7b | ||
|
37474e117c | ||
|
12525f2861 | ||
|
5449f4a94e |
86 changed files with 5698 additions and 2114 deletions
2
.clang-tidy
Normal file
2
.clang-tidy
Normal file
|
@ -0,0 +1,2 @@
|
|||
Checks: 'bugprone-*,-bugprone-easily-swappable-parameters,-bugprone-macro-parentheses,-bugprone-too-small-loop-variable,cert-*,-cert-err33-c,clang-analyzer-*,-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,-clang-diagnostic-constant-logical-operand,readability-*,-readability-function-cognitive-complexity,-readability-identifier-length,-readability-inconsistent-declaration-parameter-name,-readability-magic-numbers,-readability-named-parameter,llvm-include-order,misc-*'
|
||||
WarningsAsErrors: '*'
|
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: main
|
55
.github/workflows/build-and-test.yml
vendored
Normal file
55
.github/workflows/build-and-test.yml
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
name: Build and run tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
build-ubuntu-gcc:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
version: [12, 13, 14]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setting up gcc version
|
||||
run: |
|
||||
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${{ matrix.version }} 100
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ matrix.version }} 100
|
||||
- name: Build
|
||||
run: make test
|
||||
build-ubuntu-clang:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
version: [14, 15, 16, 17, 18]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends clang-14 clang-15
|
||||
- name: Setting up clang version
|
||||
run: |
|
||||
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${{ matrix.version }} 100
|
||||
sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${{ matrix.version }} 100
|
||||
- name: Build
|
||||
run: CC=clang CXX=clang++ make test
|
||||
build-musl:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: apk update && apk add build-base python3
|
||||
- name: Build
|
||||
run: make test
|
||||
build-ubuntu-gcc-aarch64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgcc-s1-arm64-cross cpp-aarch64-linux-gnu
|
||||
- name: Build
|
||||
run: CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-gcc++ make CONFIG_NATIVE=false
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -1,2 +1,2 @@
|
|||
*.o
|
||||
*.so
|
||||
out/
|
||||
out-light/
|
||||
|
|
36
Android.bp
36
Android.bp
|
@ -1,15 +1,14 @@
|
|||
common_cflags = [
|
||||
"-pipe",
|
||||
"-O3",
|
||||
//"-flto",
|
||||
"-fPIC",
|
||||
"-fvisibility=hidden",
|
||||
//"-fno-plt",
|
||||
"-pipe",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Wcast-align",
|
||||
"-Wcast-qual",
|
||||
"-Wwrite-strings",
|
||||
"-Werror",
|
||||
"-DH_MALLOC_PREFIX",
|
||||
"-DZERO_ON_FREE=true",
|
||||
"-DWRITE_AFTER_FREE_CHECK=true",
|
||||
|
@ -21,20 +20,21 @@ common_cflags = [
|
|||
"-DCONFIG_LARGE_SIZE_CLASSES=true",
|
||||
"-DGUARD_SLABS_INTERVAL=1",
|
||||
"-DGUARD_SIZE_DIVISOR=2",
|
||||
"-DREGION_QUARANTINE_RANDOM_LENGTH=128",
|
||||
"-DREGION_QUARANTINE_RANDOM_LENGTH=256",
|
||||
"-DREGION_QUARANTINE_QUEUE_LENGTH=1024",
|
||||
"-DREGION_QUARANTINE_SKIP_THRESHOLD=33554432", // 32MiB
|
||||
"-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=32",
|
||||
"-DCONFIG_CLASS_REGION_SIZE=1073741824", // 1GiB
|
||||
"-DCONFIG_CLASS_REGION_SIZE=34359738368", // 32GiB
|
||||
"-DN_ARENA=1",
|
||||
"-DCONFIG_STATS=true",
|
||||
"-DCONFIG_SELF_INIT=false",
|
||||
]
|
||||
|
||||
cc_defaults {
|
||||
name: "hardened_malloc_defaults",
|
||||
defaults: ["linux_bionic_supported"],
|
||||
cflags: common_cflags,
|
||||
conlyflags: ["-std=c11", "-Wmissing-prototypes"],
|
||||
conlyflags: ["-std=c17", "-Wmissing-prototypes"],
|
||||
stl: "none",
|
||||
}
|
||||
|
||||
|
@ -47,13 +47,35 @@ lib_src_files = [
|
|||
"util.c",
|
||||
]
|
||||
|
||||
cc_library_static {
|
||||
cc_library {
|
||||
name: "libhardened_malloc",
|
||||
ramdisk_available: true,
|
||||
vendor_ramdisk_available: true,
|
||||
recovery_available: true,
|
||||
defaults: ["hardened_malloc_defaults"],
|
||||
srcs: lib_src_files,
|
||||
export_include_dirs: ["include"],
|
||||
static_libs: ["libasync_safe"],
|
||||
target: {
|
||||
android: {
|
||||
shared: {
|
||||
enabled: false,
|
||||
},
|
||||
system_shared_libs: [],
|
||||
},
|
||||
linux_bionic: {
|
||||
system_shared_libs: [],
|
||||
},
|
||||
},
|
||||
product_variables: {
|
||||
debuggable: {
|
||||
cflags: ["-DLABEL_MEMORY"],
|
||||
},
|
||||
device_has_arm_mte: {
|
||||
cflags: ["-DHAS_ARM_MTE", "-march=armv8-a+dotprod+memtag"]
|
||||
},
|
||||
},
|
||||
apex_available: [
|
||||
"com.android.runtime",
|
||||
],
|
||||
}
|
||||
|
|
232
CREDITS
232
CREDITS
|
@ -4,7 +4,7 @@ chacha.c is a simple conversion of chacha-merged.c to a keystream-only implement
|
|||
D. J. Bernstein
|
||||
Public domain.
|
||||
|
||||
malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find, regions_delete):
|
||||
h_malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find, regions_delete):
|
||||
|
||||
Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
|
||||
Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
|
||||
|
@ -25,7 +25,8 @@ malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find,
|
|||
|
||||
libdivide:
|
||||
|
||||
Copyright (C) 2010 ridiculous_fish
|
||||
Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
|
||||
Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
|
||||
|
||||
Boost Software License - Version 1.0 - August 17th, 2003
|
||||
|
||||
|
@ -53,3 +54,230 @@ libdivide:
|
|||
|
||||
random.c get_random_{type}_uniform functions are based on Fast Random Integer
|
||||
Generation in an Interval by Daniel Lemire
|
||||
|
||||
arm_mte.h arm_mte_tag_and_clear_mem function contents were copied from storeTags function in scudo:
|
||||
|
||||
==============================================================================
|
||||
The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
|
||||
==============================================================================
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
---- LLVM Exceptions to the Apache 2.0 License ----
|
||||
|
||||
As an exception, if, as a result of your compiling your source code, portions
|
||||
of this Software are embedded into an Object form of such source code, you
|
||||
may redistribute such embedded portions in such Object form without complying
|
||||
with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
|
||||
|
||||
In addition, if you combine or link compiled forms of this Software with
|
||||
software that is licensed under the GPLv2 ("Combined Software") and if a
|
||||
court of competent jurisdiction determines that the patent provision (Section
|
||||
3), the indemnity provision (Section 9) or other Section of the License
|
||||
conflicts with the conditions of the GPLv2, you may retroactively and
|
||||
prospectively choose to deem waived or otherwise exclude such Section(s) of
|
||||
the License, but only in their entirety and only with respect to the Combined
|
||||
Software.
|
||||
|
||||
==============================================================================
|
||||
|
|
|
@ -16,6 +16,8 @@ Somewhat important and an easy sell:
|
|||
* also needed by jemalloc for different reasons
|
||||
* not needed if the kernel gets first class support for arbitrarily sized
|
||||
guard pages and a virtual memory quarantine feature
|
||||
* `MREMAP_DONTUNMAP` is now available but doesn't support expanding the
|
||||
mapping which may be an issue due to VMA merging being unreliable
|
||||
|
||||
Fairly infeasible to land but could reduce overhead and extend coverage of
|
||||
security features to other code directly using mmap:
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2019 Daniel Micay
|
||||
Copyright © 2018-2025 GrapheneOS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
119
Makefile
119
Makefile
|
@ -1,57 +1,60 @@
|
|||
CONFIG_NATIVE := true
|
||||
CONFIG_CXX_ALLOCATOR := true
|
||||
CONFIG_UBSAN := false
|
||||
CONFIG_SEAL_METADATA := false
|
||||
CONFIG_ZERO_ON_FREE := true
|
||||
CONFIG_WRITE_AFTER_FREE_CHECK := true
|
||||
CONFIG_SLOT_RANDOMIZE := true
|
||||
CONFIG_SLAB_CANARY := true
|
||||
CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 1
|
||||
CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 1
|
||||
CONFIG_EXTENDED_SIZE_CLASSES := true
|
||||
CONFIG_LARGE_SIZE_CLASSES := true
|
||||
CONFIG_GUARD_SLABS_INTERVAL := 1
|
||||
CONFIG_GUARD_SIZE_DIVISOR := 2
|
||||
CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 128
|
||||
CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024
|
||||
CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB
|
||||
CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32
|
||||
CONFIG_CLASS_REGION_SIZE := 137438953472 # 128GiB
|
||||
CONFIG_N_ARENA := 4
|
||||
CONFIG_STATS := false
|
||||
VARIANT := default
|
||||
|
||||
ifneq ($(VARIANT),)
|
||||
CONFIG_FILE := config/$(VARIANT).mk
|
||||
include config/$(VARIANT).mk
|
||||
endif
|
||||
|
||||
ifeq ($(VARIANT),default)
|
||||
SUFFIX :=
|
||||
else
|
||||
SUFFIX := -$(VARIANT)
|
||||
endif
|
||||
|
||||
OUT := out$(SUFFIX)
|
||||
|
||||
define safe_flag
|
||||
$(shell $(CC) -E $1 - </dev/null >/dev/null 2>&1 && echo $1 || echo $2)
|
||||
$(shell $(CC) $(if $(filter clang%,$(CC)),-Werror=unknown-warning-option) -E $1 - </dev/null >/dev/null 2>&1 && echo $1 || echo $2)
|
||||
endef
|
||||
|
||||
CPPFLAGS := -D_GNU_SOURCE
|
||||
SHARED_FLAGS := -O3 -flto -fPIC -fvisibility=hidden -fno-plt -pipe -Wall -Wextra $(call safe_flag,-Wcast-align=strict) -Wcast-qual -Wwrite-strings
|
||||
CPPFLAGS := $(CPPFLAGS) -D_GNU_SOURCE -I include
|
||||
SHARED_FLAGS := -pipe -O3 -flto -fPIC -fvisibility=hidden -fno-plt \
|
||||
-fstack-clash-protection $(call safe_flag,-fcf-protection) -fstack-protector-strong \
|
||||
-Wall -Wextra $(call safe_flag,-Wcast-align=strict,-Wcast-align) -Wcast-qual -Wwrite-strings \
|
||||
-Wundef
|
||||
|
||||
ifeq ($(CONFIG_WERROR),true)
|
||||
SHARED_FLAGS += -Werror
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_NATIVE),true)
|
||||
SHARED_FLAGS += -march=native
|
||||
endif
|
||||
|
||||
CFLAGS := -std=c11 $(SHARED_FLAGS) -Wmissing-prototypes
|
||||
CXXFLAGS := $(call safe_flag,-std=c++17,-std=c++14) $(SHARED_FLAGS)
|
||||
LDFLAGS := -Wl,--as-needed,-z,defs,-z,relro,-z,now,-z,nodlopen,-z,text
|
||||
TIDY_CHECKS := -checks=bugprone-*,-bugprone-macro-parentheses,cert-*,clang-analyzer-*,readability-*,-readability-inconsistent-declaration-parameter-name,-readability-magic-numbers,-readability-named-parameter,-bugprone-too-small-loop-variable
|
||||
ifeq ($(CONFIG_UBSAN),true)
|
||||
SHARED_FLAGS += -fsanitize=undefined -fno-sanitize-recover=undefined
|
||||
endif
|
||||
|
||||
CFLAGS := $(CFLAGS) -std=c17 $(SHARED_FLAGS) -Wmissing-prototypes -Wstrict-prototypes
|
||||
CXXFLAGS := $(CXXFLAGS) -std=c++17 -fsized-deallocation $(SHARED_FLAGS)
|
||||
LDFLAGS := $(LDFLAGS) -Wl,-O1,--as-needed,-z,defs,-z,relro,-z,now,-z,nodlopen,-z,text
|
||||
|
||||
SOURCES := chacha.c h_malloc.c memory.c pages.c random.c util.c
|
||||
OBJECTS := $(SOURCES:.c=.o)
|
||||
|
||||
ifeq ($(CONFIG_CXX_ALLOCATOR),true)
|
||||
# make sure LTO is compatible in case CC and CXX don't match (such as clang and g++)
|
||||
CXX := $(CC)
|
||||
LDLIBS += -lstdc++
|
||||
|
||||
SOURCES += new.cc
|
||||
OBJECTS += new.o
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_UBSAN),true)
|
||||
CFLAGS += -fsanitize=undefined
|
||||
CXXFLAGS += -fsanitize=undefined
|
||||
endif
|
||||
OBJECTS := $(addprefix $(OUT)/,$(OBJECTS))
|
||||
|
||||
ifeq ($(CONFIG_SEAL_METADATA),true)
|
||||
CPPFLAGS += -DCONFIG_SEAL_METADATA
|
||||
ifeq (,$(filter $(CONFIG_SEAL_METADATA),true false))
|
||||
$(error CONFIG_SEAL_METADATA must be true or false)
|
||||
endif
|
||||
|
||||
ifeq (,$(filter $(CONFIG_ZERO_ON_FREE),true false))
|
||||
|
@ -82,7 +85,12 @@ ifeq (,$(filter $(CONFIG_STATS),true false))
|
|||
$(error CONFIG_STATS must be true or false)
|
||||
endif
|
||||
|
||||
ifeq (,$(filter $(CONFIG_SELF_INIT),true false))
|
||||
$(error CONFIG_SELF_INIT must be true or false)
|
||||
endif
|
||||
|
||||
CPPFLAGS += \
|
||||
-DCONFIG_SEAL_METADATA=$(CONFIG_SEAL_METADATA) \
|
||||
-DZERO_ON_FREE=$(CONFIG_ZERO_ON_FREE) \
|
||||
-DWRITE_AFTER_FREE_CHECK=$(CONFIG_WRITE_AFTER_FREE_CHECK) \
|
||||
-DSLOT_RANDOMIZE=$(CONFIG_SLOT_RANDOMIZE) \
|
||||
|
@ -99,23 +107,42 @@ CPPFLAGS += \
|
|||
-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=$(CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH) \
|
||||
-DCONFIG_CLASS_REGION_SIZE=$(CONFIG_CLASS_REGION_SIZE) \
|
||||
-DN_ARENA=$(CONFIG_N_ARENA) \
|
||||
-DCONFIG_STATS=$(CONFIG_STATS)
|
||||
-DCONFIG_STATS=$(CONFIG_STATS) \
|
||||
-DCONFIG_SELF_INIT=$(CONFIG_SELF_INIT)
|
||||
|
||||
libhardened_malloc.so: $(OBJECTS)
|
||||
$(OUT)/libhardened_malloc$(SUFFIX).so: $(OBJECTS) | $(OUT)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) -shared $^ $(LDLIBS) -o $@
|
||||
|
||||
chacha.o: chacha.c chacha.h util.h
|
||||
h_malloc.o: h_malloc.c h_malloc.h mutex.h memory.h pages.h random.h util.h
|
||||
memory.o: memory.c memory.h util.h
|
||||
new.o: new.cc h_malloc.h util.h
|
||||
pages.o: pages.c pages.h memory.h util.h
|
||||
random.o: random.c random.h chacha.h util.h
|
||||
util.o: util.c util.h
|
||||
$(OUT):
|
||||
mkdir -p $(OUT)
|
||||
|
||||
$(OUT)/chacha.o: chacha.c chacha.h util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.c) $(OUTPUT_OPTION) $<
|
||||
$(OUT)/h_malloc.o: h_malloc.c include/h_malloc.h mutex.h memory.h pages.h random.h util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.c) $(OUTPUT_OPTION) $<
|
||||
$(OUT)/memory.o: memory.c memory.h util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.c) $(OUTPUT_OPTION) $<
|
||||
$(OUT)/new.o: new.cc include/h_malloc.h util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.cc) $(OUTPUT_OPTION) $<
|
||||
$(OUT)/pages.o: pages.c pages.h memory.h util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.c) $(OUTPUT_OPTION) $<
|
||||
$(OUT)/random.o: random.c random.h chacha.h util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.c) $(OUTPUT_OPTION) $<
|
||||
$(OUT)/util.o: util.c util.h $(CONFIG_FILE) | $(OUT)
|
||||
$(COMPILE.c) $(OUTPUT_OPTION) $<
|
||||
|
||||
check: tidy
|
||||
|
||||
tidy:
|
||||
clang-tidy $(TIDY_CHECKS) $(SOURCES) -- $(CPPFLAGS)
|
||||
clang-tidy --extra-arg=-std=c17 $(filter %.c,$(SOURCES)) -- $(CPPFLAGS)
|
||||
clang-tidy --extra-arg=-std=c++17 $(filter %.cc,$(SOURCES)) -- $(CPPFLAGS)
|
||||
|
||||
clean:
|
||||
rm -f libhardened_malloc.so $(OBJECTS)
|
||||
rm -f $(OUT)/libhardened_malloc.so $(OBJECTS)
|
||||
$(MAKE) -C test/ clean
|
||||
|
||||
.PHONY: clean tidy
|
||||
test: $(OUT)/libhardened_malloc$(SUFFIX).so
|
||||
$(MAKE) -C test/
|
||||
python3 -m unittest discover --start-directory test/
|
||||
|
||||
.PHONY: check clean tidy test
|
||||
|
|
726
README.md
726
README.md
|
@ -1,5 +1,30 @@
|
|||
# Hardened malloc
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Dependencies](#dependencies)
|
||||
* [Testing](#testing)
|
||||
* [Individual Applications](#individual-applications)
|
||||
* [Automated Test Framework](#automated-test-framework)
|
||||
* [Compatibility](#compatibility)
|
||||
* [OS integration](#os-integration)
|
||||
* [Android-based operating systems](#android-based-operating-systems)
|
||||
* [Traditional Linux-based operating systems](#traditional-linux-based-operating-systems)
|
||||
* [Configuration](#configuration)
|
||||
* [Core design](#core-design)
|
||||
* [Security properties](#security-properties)
|
||||
* [Randomness](#randomness)
|
||||
* [Size classes](#size-classes)
|
||||
* [Scalability](#scalability)
|
||||
* [Small (slab) allocations](#small-slab-allocations)
|
||||
* [Thread caching (or lack thereof)](#thread-caching-or-lack-thereof)
|
||||
* [Large allocations](#large-allocations)
|
||||
* [Memory tagging](#memory-tagging)
|
||||
* [API extensions](#api-extensions)
|
||||
* [Stats](#stats)
|
||||
* [System calls](#system-calls)
|
||||
|
||||
## Introduction
|
||||
|
||||
This is a security-focused general purpose memory allocator providing the
|
||||
malloc API along with various extensions. It provides substantial hardening
|
||||
against heap corruption vulnerabilities. The security-focused design also leads
|
||||
|
@ -7,7 +32,7 @@ to much less metadata overhead and memory waste from fragmentation than a more
|
|||
traditional allocator design. It aims to provide decent overall performance
|
||||
with a focus on long-term performance and memory usage rather than allocator
|
||||
micro-benchmarks. It offers scalability via a configurable number of entirely
|
||||
independently arenas, with the internal locking within arenas further divided
|
||||
independent arenas, with the internal locking within arenas further divided
|
||||
up per size class.
|
||||
|
||||
This project currently supports Bionic (Android), musl and glibc. It may
|
||||
|
@ -20,17 +45,17 @@ and can cover the same use cases.
|
|||
This allocator is intended as a successor to a previous implementation based on
|
||||
extending OpenBSD malloc with various additional security features. It's still
|
||||
heavily based on the OpenBSD malloc design, albeit not on the existing code
|
||||
other than reusing the hash table implementation for the time being. The main
|
||||
differences in the design are that it is solely focused on hardening rather
|
||||
than finding bugs, uses finer-grained size classes along with slab sizes going
|
||||
beyond 4k to reduce internal fragmentation, doesn't rely on the kernel having
|
||||
fine-grained mmap randomization and only targets 64-bit to make aggressive use
|
||||
of the large address space. There are lots of smaller differences in the
|
||||
implementation approach. It incorporates the previous extensions made to
|
||||
OpenBSD malloc including adding padding to allocations for canaries (distinct
|
||||
from the current OpenBSD malloc canaries), write-after-free detection tied to
|
||||
the existing clearing on free, queues alongside the existing randomized arrays
|
||||
for quarantining allocations and proper double-free detection for quarantined
|
||||
other than reusing the hash table implementation. The main differences in the
|
||||
design are that it's solely focused on hardening rather than finding bugs, uses
|
||||
finer-grained size classes along with slab sizes going beyond 4k to reduce
|
||||
internal fragmentation, doesn't rely on the kernel having fine-grained mmap
|
||||
randomization and only targets 64-bit to make aggressive use of the large
|
||||
address space. There are lots of smaller differences in the implementation
|
||||
approach. It incorporates the previous extensions made to OpenBSD malloc
|
||||
including adding padding to allocations for canaries (distinct from the current
|
||||
OpenBSD malloc canaries), write-after-free detection tied to the existing
|
||||
clearing on free, queues alongside the existing randomized arrays for
|
||||
quarantining allocations and proper double-free detection for quarantined
|
||||
allocations. The per-size-class memory regions with their own random bases were
|
||||
loosely inspired by the size and type-based partitioning in PartitionAlloc. The
|
||||
planned changes to OpenBSD malloc ended up being too extensive and invasive so
|
||||
|
@ -40,11 +65,14 @@ used instead as this allocator fundamentally doesn't support that environment.
|
|||
|
||||
## Dependencies
|
||||
|
||||
Debian stable determines the most ancient set of supported dependencies:
|
||||
Debian stable (currently Debian 12) determines the most ancient set of
|
||||
supported dependencies:
|
||||
|
||||
* glibc 2.24
|
||||
* Linux 4.9
|
||||
* Clang 3.8 or GCC 6.3
|
||||
* glibc 2.36
|
||||
* Linux 6.1
|
||||
* Clang 14.0.6 or GCC 12.2.0
|
||||
|
||||
For Android, the Linux GKI 5.10, 5.15 and 6.1 branches are supported.
|
||||
|
||||
However, using more recent releases is highly recommended. Older versions of
|
||||
the dependencies may be compatible at the moment but are not tested and will
|
||||
|
@ -54,18 +82,20 @@ For external malloc replacement with musl, musl 1.1.20 is required. However,
|
|||
there will be custom integration offering better performance in the future
|
||||
along with other hardening for the C standard library implementation.
|
||||
|
||||
For Android, only current generation Android Open Source Project branches will
|
||||
be supported, which currently means pie-qpr2-release.
|
||||
For Android, only the current generation, actively developed maintenance branch of the Android
|
||||
Open Source Project will be supported, which currently means `android15-release`.
|
||||
|
||||
## Testing
|
||||
|
||||
### Individual Applications
|
||||
|
||||
The `preload.sh` script can be used for testing with dynamically linked
|
||||
executables using glibc or musl:
|
||||
|
||||
./preload.sh krita --new-image RGBA,U8,500,500
|
||||
|
||||
It can be necessary to substantially increase the `vm.max_map_count` sysctl to
|
||||
accomodate the large number of mappings caused by guard slabs and large
|
||||
accommodate the large number of mappings caused by guard slabs and large
|
||||
allocation guard regions. The number of mappings can also be drastically
|
||||
reduced via a significant increase to `CONFIG_GUARD_SLABS_INTERVAL` but the
|
||||
feature has a low performance and memory usage cost so that isn't recommended.
|
||||
|
@ -78,6 +108,82 @@ this allocator offers across different size classes. The intention is that this
|
|||
will be offered as part of hardened variants of the Bionic and musl C standard
|
||||
libraries.
|
||||
|
||||
### Automated Test Framework
|
||||
|
||||
A collection of simple, automated tests are provided and can be run with the
|
||||
make command as follows:
|
||||
|
||||
make test
|
||||
|
||||
## Compatibility
|
||||
|
||||
OpenSSH 8.1 or higher is required to allow the mprotect `PROT_READ|PROT_WRITE`
|
||||
system calls in the seccomp-bpf filter rather than killing the process.
|
||||
|
||||
## OS integration
|
||||
|
||||
### Android-based operating systems
|
||||
|
||||
On GrapheneOS, hardened\_malloc is integrated into the standard C library as
|
||||
the standard malloc implementation. Other Android-based operating systems can
|
||||
reuse [the integration
|
||||
code](https://github.com/GrapheneOS/platform_bionic/commit/20160b81611d6f2acd9ab59241bebeac7cf1d71c)
|
||||
to provide it. If desired, jemalloc can be left as a runtime configuration
|
||||
option by only conditionally using hardened\_malloc to give users the choice
|
||||
between performance and security. However, this reduces security for threat
|
||||
models where persistent state is untrusted, i.e. verified boot and attestation
|
||||
(see the [attestation sister project](https://attestation.app/about)).
|
||||
|
||||
Make sure to raise `vm.max_map_count` substantially too to accommodate the very
|
||||
large number of guard pages created by hardened\_malloc. This can be done in
|
||||
`init.rc` (`system/core/rootdir/init.rc`) near the other virtual memory
|
||||
configuration:
|
||||
|
||||
write /proc/sys/vm/max_map_count 1048576
|
||||
|
||||
This is unnecessary if you set `CONFIG_GUARD_SLABS_INTERVAL` to a very large
|
||||
value in the build configuration.
|
||||
|
||||
### Traditional Linux-based operating systems
|
||||
|
||||
On traditional Linux-based operating systems, hardened\_malloc can either be
|
||||
integrated into the libc implementation as a replacement for the standard
|
||||
malloc implementation or loaded as a dynamic library. Rather than rebuilding
|
||||
each executable to be linked against it, it can be added as a preloaded
|
||||
library to `/etc/ld.so.preload`. For example, with `libhardened_malloc.so`
|
||||
installed to `/usr/local/lib/libhardened_malloc.so`, add that full path as a
|
||||
line to the `/etc/ld.so.preload` configuration file:
|
||||
|
||||
/usr/local/lib/libhardened_malloc.so
|
||||
|
||||
The format of this configuration file is a whitespace-separated list, so it's
|
||||
good practice to put each library on a separate line.
|
||||
|
||||
On Debian systems `libhardened_malloc.so` should be installed into `/usr/lib/`
|
||||
to avoid preload failures caused by AppArmor profile restrictions.
|
||||
|
||||
Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis
|
||||
will not work when `AT_SECURE` is set such as with setuid binaries. It's also
|
||||
generally not a recommended approach for production usage. The recommendation
|
||||
is to enable it globally and make exceptions for performance critical cases by
|
||||
running the application in a container / namespace without it enabled.
|
||||
|
||||
Make sure to raise `vm.max_map_count` substantially too to accommodate the very
|
||||
large number of guard pages created by hardened\_malloc. As an example, in
|
||||
`/etc/sysctl.d/hardened_malloc.conf`:
|
||||
|
||||
vm.max_map_count = 1048576
|
||||
|
||||
This is unnecessary if you set `CONFIG_GUARD_SLABS_INTERVAL` to a very large
|
||||
value in the build configuration.
|
||||
|
||||
On arm64, make sure your kernel is configured to use 4k pages since we haven't
|
||||
yet added support for 16k and 64k pages. The kernel also has to be configured
|
||||
to use 4 level page tables for the full 48 bit address space instead of only
|
||||
having a 39 bit address space for the default hardened\_malloc configuration.
|
||||
It's possible to reduce the class region size substantially to make a 39 bit
|
||||
address space workable but the defaults won't work.
|
||||
|
||||
## Configuration
|
||||
|
||||
You can set some configuration options at compile-time via arguments to the
|
||||
|
@ -90,8 +196,45 @@ between portability, performance, memory usage or security. The core design
|
|||
choices are not configurable and the allocator remains very security-focused
|
||||
even with all the optional features disabled.
|
||||
|
||||
The configuration system supports a configuration template system with two
|
||||
standard presets: the default configuration (`config/default.mk`) and a light
|
||||
configuration (`config/light.mk`). Packagers are strongly encouraged to ship
|
||||
both the standard `default` and `light` configuration. You can choose the
|
||||
configuration to build using `make VARIANT=light` where `make VARIANT=default`
|
||||
is the same as `make`. Non-default configuration templates will build a library
|
||||
with the suffix `-variant` such as `libhardened_malloc-light.so` and will use
|
||||
an `out-variant` directory instead of `out` for the build.
|
||||
|
||||
The `default` configuration template has all normal optional security features
|
||||
enabled (just not the niche `CONFIG_SEAL_METADATA`) and is quite aggressive in
|
||||
terms of sacrificing performance and memory usage for security. The `light`
|
||||
configuration template disables the slab quarantines, write after free check,
|
||||
slot randomization and raises the guard slab interval from 1 to 8 but leaves
|
||||
zero-on-free and slab canaries enabled. The `light` configuration has solid
|
||||
performance and memory usage while still being far more secure than mainstream
|
||||
allocators with much better security properties. Disabling zero-on-free would
|
||||
gain more performance but doesn't make much difference for small allocations
|
||||
without also disabling slab canaries. Slab canaries slightly raise memory use
|
||||
and slightly slow down performance but are quite important to mitigate small
|
||||
overflows and C string overflows. Disabling slab canaries is not recommended
|
||||
in most cases since it would no longer be a strict upgrade over traditional
|
||||
allocators with headers on allocations and basic consistency checks for them.
|
||||
|
||||
For reduced memory usage at the expense of performance (this will also reduce
|
||||
the size of the empty slab caches and quarantines, saving a lot of memory,
|
||||
since those are currently based on the size of the largest size class):
|
||||
|
||||
make \
|
||||
N_ARENA=1 \
|
||||
CONFIG_EXTENDED_SIZE_CLASSES=false
|
||||
|
||||
The following boolean configuration options are available:
|
||||
|
||||
* `CONFIG_WERROR`: `true` (default) or `false` to control whether compiler
|
||||
warnings are treated as errors. This is highly recommended, but it can be
|
||||
disabled to avoid patching the Makefile if a compiler version not tested by
|
||||
the project is being used and has warnings. Investigating these warnings is
|
||||
still recommended and the intention is to always be free of any warnings.
|
||||
* `CONFIG_NATIVE`: `true` (default) or `false` to control whether the code is
|
||||
optimized for the detected CPU on the host. If this is disabled, setting up a
|
||||
custom `-march` higher than the baseline architecture is highly recommended
|
||||
|
@ -104,12 +247,15 @@ The following boolean configuration options are available:
|
|||
allocations are zeroed on free, to mitigate use-after-free and uninitialized
|
||||
use vulnerabilities along with purging lots of potentially sensitive data
|
||||
from the process as soon as possible. This has a performance cost scaling to
|
||||
the size of the allocation, which is usually acceptable.
|
||||
the size of the allocation, which is usually acceptable. This is not relevant
|
||||
to large allocations because the pages are given back to the kernel.
|
||||
* `CONFIG_WRITE_AFTER_FREE_CHECK`: `true` (default) or `false` to control
|
||||
sanity checking that new allocations contain zeroed memory. This can detect
|
||||
writes caused by a write-after-free vulnerability and mixes well with the
|
||||
features for making memory reuse randomized / delayed. This has a performance
|
||||
cost scaling to the size of the allocation, which is usually acceptable.
|
||||
sanity checking that new small allocations contain zeroed memory. This can
|
||||
detect writes caused by a write-after-free vulnerability and mixes well with
|
||||
the features for making memory reuse randomized / delayed. This has a
|
||||
performance cost scaling to the size of the allocation, which is usually
|
||||
acceptable. This is not relevant to large allocations because they're always
|
||||
a fresh memory mapping from the kernel.
|
||||
* `CONFIG_SLOT_RANDOMIZE`: `true` (default) or `false` to randomize selection
|
||||
of free slots within slabs. This has a measurable performance cost and isn't
|
||||
one of the important security features, but the cost has been deemed more
|
||||
|
@ -126,84 +272,112 @@ The following boolean configuration options are available:
|
|||
* `CONFIG_SEAL_METADATA`: `true` or `false` (default) to control whether Memory
|
||||
Protection Keys are used to disable access to all writable allocator state
|
||||
outside of the memory allocator code. It's currently disabled by default due
|
||||
to being extremely experimental and a significant performance cost for this
|
||||
use case on current generation hardware, which may become drastically lower
|
||||
in the future. Whether or not this feature is enabled, the metadata is all
|
||||
contained within an isolated memory region with high entropy random guard
|
||||
regions around it.
|
||||
to a significant performance cost for this use case on current generation
|
||||
hardware, which may become drastically lower in the future. Whether or not
|
||||
this feature is enabled, the metadata is all contained within an isolated
|
||||
memory region with high entropy random guard regions around it.
|
||||
|
||||
The following integer configuration options are available. Proper sanity checks
|
||||
for the chosen values are not written yet, so use them at your own peril:
|
||||
The following integer configuration options are available:
|
||||
|
||||
* `CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH`: `1` (default) to control the number
|
||||
of slots in the random array used to randomize reuse for small memory
|
||||
allocations. This sets the length for the largest size class (currently
|
||||
16384) and the quarantine length for smaller size classes is scaled to match
|
||||
the total memory of the quarantined allocations (1 becomes 1024 for 16 byte
|
||||
allocations).
|
||||
allocations. This sets the length for the largest size class (either 16kiB
|
||||
or 128kiB based on `CONFIG_EXTENDED_SIZE_CLASSES`) and the quarantine length
|
||||
for smaller size classes is scaled to match the total memory of the
|
||||
quarantined allocations (1 becomes 1024 for 16 byte allocations with 16kiB
|
||||
as the largest size class, or 8192 with 128kiB as the largest).
|
||||
* `CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH`: `1` (default) to control the number of
|
||||
slots in the queue used to delay reuse for small memory allocations. This
|
||||
sets the length for the largest size class (currently 16384) and the
|
||||
quarantine length for smaller size classes is scaled to match the total
|
||||
memory of the quarantined allocations (1 becomes 1024 for 16 byte
|
||||
allocations).
|
||||
sets the length for the largest size class (either 16kiB or 128kiB based on
|
||||
`CONFIG_EXTENDED_SIZE_CLASSES`) and the quarantine length for smaller size
|
||||
classes is scaled to match the total memory of the quarantined allocations (1
|
||||
becomes 1024 for 16 byte allocations with 16kiB as the largest size class, or
|
||||
8192 with 128kiB as the largest).
|
||||
* `CONFIG_GUARD_SLABS_INTERVAL`: `1` (default) to control the number of slabs
|
||||
before a slab is skipped and left as an unused memory protected guard slab
|
||||
before a slab is skipped and left as an unused memory protected guard slab.
|
||||
The default of `1` leaves a guard slab between every slab. This feature does
|
||||
not have a *direct* performance cost, but it makes the address space usage
|
||||
sparser which can indirectly hurt performance. The kernel also needs to track
|
||||
a lot more memory mappings, which uses a bit of extra memory and slows down
|
||||
memory mapping and memory protection changes in the process. The kernel uses
|
||||
O(log n) algorithms for this and system calls are already fairly slow anyway,
|
||||
so having many extra mappings doesn't usually add up to a significant cost.
|
||||
* `CONFIG_GUARD_SIZE_DIVISOR`: `2` (default) to control the maximum size of the
|
||||
guard regions placed on both sides of large memory allocations, relative to
|
||||
the usable size of the memory allocation
|
||||
* `CONFIG_REGION_QUARANTINE_RANDOM_LENGTH`: `128` (default) to control the
|
||||
the usable size of the memory allocation.
|
||||
* `CONFIG_REGION_QUARANTINE_RANDOM_LENGTH`: `256` (default) to control the
|
||||
number of slots in the random array used to randomize region reuse for large
|
||||
memory allocations
|
||||
memory allocations.
|
||||
* `CONFIG_REGION_QUARANTINE_QUEUE_LENGTH`: `1024` (default) to control the
|
||||
number of slots in the queue used to delay region reuse for large memory
|
||||
allocations
|
||||
allocations.
|
||||
* `CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD`: `33554432` (default) to control
|
||||
the size threshold where large allocations will not be quarantined
|
||||
the size threshold where large allocations will not be quarantined.
|
||||
* `CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH`: `32` (default) to control the
|
||||
number of slots in the random array used to randomize free slab reuse
|
||||
number of slots in the random array used to randomize free slab reuse.
|
||||
* `CONFIG_CLASS_REGION_SIZE`: `34359738368` (default) to control the size of
|
||||
the size class regions
|
||||
* `CONFIG_N_ARENA`: `1` (default) to control the number of arenas
|
||||
the size class regions.
|
||||
* `CONFIG_N_ARENA`: `4` (default) to control the number of arenas
|
||||
* `CONFIG_STATS`: `false` (default) to control whether stats on allocation /
|
||||
deallocation count and active allocations are tracked. This is currently only
|
||||
exposed via the mallinfo APIs on Android.
|
||||
deallocation count and active allocations are tracked. See the [section on
|
||||
stats](#stats) for more details.
|
||||
* `CONFIG_EXTENDED_SIZE_CLASSES`: `true` (default) to control whether small
|
||||
size class go up to 64k instead of the minimum requirement for avoiding
|
||||
memory waste of 16k. The option to extend it even further will be offered in
|
||||
the future when better support for larger slab allocations is added.
|
||||
size class go up to 128kiB instead of the minimum requirement for avoiding
|
||||
memory waste of 16kiB. The option to extend it even further will be offered
|
||||
in the future when better support for larger slab allocations is added. See
|
||||
the [section on size classes](#size-classes) below for details.
|
||||
* `CONFIG_LARGE_SIZE_CLASSES`: `true` (default) to control whether large
|
||||
allocations use the slab allocation size class scheme instead of page size
|
||||
granularity (see the section on size classes below)
|
||||
granularity. See the [section on size classes](#size-classes) below for
|
||||
details.
|
||||
|
||||
There will be more control over enabled features in the future along with
|
||||
control over fairly arbitrarily chosen values like the size of empty slab
|
||||
caches (making them smaller improves security and reduces memory usage while
|
||||
larger caches can substantially improves performance).
|
||||
|
||||
## Basic design
|
||||
## Core design
|
||||
|
||||
The current design is very simple and will become a bit more sophisticated as
|
||||
the basic features are completed and the implementation is hardened and
|
||||
optimized. The allocator is exclusive to 64-bit platforms in order to take full
|
||||
advantage of the abundant address space without being constrained by needing to
|
||||
keep the design compatible with 32-bit.
|
||||
The core design of the allocator is very simple / minimalist. The allocator is
|
||||
exclusive to 64-bit platforms in order to take full advantage of the abundant
|
||||
address space without being constrained by needing to keep the design
|
||||
compatible with 32-bit.
|
||||
|
||||
The mutable allocator state is entirely located within a dedicated metadata
|
||||
region, and the allocator is designed around this approach for both small
|
||||
(slab) allocations and large allocations. This provides reliable, deterministic
|
||||
protections against invalid free including double frees, and protects metadata
|
||||
from attackers. Traditional allocator exploitation techniques do not work with
|
||||
the hardened\_malloc implementation.
|
||||
|
||||
Small allocations are always located in a large memory region reserved for slab
|
||||
allocations. It can be determined that an allocation is one of the small size
|
||||
classes from the address range. Each small size class has a separate reserved
|
||||
region within the larger region, and the size of a small allocation can simply
|
||||
be determined from the range. Each small size class has a separate out-of-line
|
||||
metadata array outside of the overall allocation region, with the index of the
|
||||
metadata struct within the array mapping to the index of the slab within the
|
||||
dedicated size class region. Slabs are a multiple of the page size and are
|
||||
page aligned. The entire small size class region starts out memory protected
|
||||
and becomes readable / writable as it gets allocated, with idle slabs beyond
|
||||
the cache limit having their pages dropped and the memory protected again.
|
||||
allocations. On free, it can be determined that an allocation is one of the
|
||||
small size classes from the address range. If arenas are enabled, the arena is
|
||||
also determined from the address range as each arena has a dedicated sub-region
|
||||
in the slab allocation region. Arenas provide totally independent slab
|
||||
allocators with their own allocator state and no coordination between them.
|
||||
Once the base region is determined (simply the slab allocation region as a
|
||||
whole without any arenas enabled), the size class is determined from the
|
||||
address range too, since it's divided up into a sub-region for each size class.
|
||||
There's a top level slab allocation region, divided up into arenas, with each
|
||||
of those divided up into size class regions. The size class regions each have a
|
||||
random base within a large guard region. Once the size class is determined, the
|
||||
slab size is known, and the index of the slab is calculated and used to obtain
|
||||
the slab metadata for the slab from the slab metadata array. Finally, the index
|
||||
of the slot within the slab provides the index of the bit tracking the slot in
|
||||
the bitmap. Every slab allocation slot has a dedicated bit in a bitmap tracking
|
||||
whether it's free, along with a separate bitmap for tracking allocations in the
|
||||
quarantine. The slab metadata entries in the array have intrusive lists
|
||||
threaded through them to track partial slabs (partially filled, and these are
|
||||
the first choice for allocation), empty slabs (limited amount of cached free
|
||||
memory) and free slabs (purged / memory protected).
|
||||
|
||||
Large allocations are tracked via a global hash table mapping their address to
|
||||
their size and guard size. They're simply memory mappings and get mapped on
|
||||
allocation and then unmapped on free.
|
||||
their size and random guard size. They're simply memory mappings and get mapped
|
||||
on allocation and then unmapped on free. Large allocations are the only dynamic
|
||||
memory mappings made by the allocator, since the address space for allocator
|
||||
state (including both small / large allocation metadata) and slab allocations
|
||||
is statically reserved.
|
||||
|
||||
This allocator is aimed at production usage, not aiding with finding and fixing
|
||||
memory corruption bugs for software development. It does find many latent bugs
|
||||
|
@ -268,6 +442,7 @@ was a bit less important and if a core goal was finding latent bugs.
|
|||
* Slab allocations are zeroed on free
|
||||
* Detection of write-after-free for slab allocations by verifying zero filling
|
||||
is intact at allocation time
|
||||
* Delayed free via a combination of FIFO and randomization for slab allocations
|
||||
* Large allocations are purged and memory protected on free with the memory
|
||||
mapping kept reserved in a quarantine to detect use-after-free
|
||||
* The quarantine is primarily based on a FIFO ring buffer, with the oldest
|
||||
|
@ -278,7 +453,6 @@ was a bit less important and if a core goal was finding latent bugs.
|
|||
of the quarantine
|
||||
* Memory in fresh allocations is consistently zeroed due to it either being
|
||||
fresh pages or zeroed on free after previous usage
|
||||
* Delayed free via a combination of FIFO and randomization for slab allocations
|
||||
* Random canaries placed after each slab allocation to *absorb*
|
||||
and then later detect overflows/underflows
|
||||
* High entropy per-slab random values
|
||||
|
@ -287,8 +461,9 @@ was a bit less important and if a core goal was finding latent bugs.
|
|||
size class regions interspersed with guard pages
|
||||
* Zero size allocations are a dedicated size class with the entire region
|
||||
remaining non-readable and non-writable
|
||||
* Extension for retrieving the size of allocations with fallback
|
||||
to a sentinel for pointers not managed by the allocator
|
||||
* Extension for retrieving the size of allocations with fallback to a sentinel
|
||||
for pointers not managed by the allocator [in-progress, full implementation
|
||||
needs to be ported from the previous OpenBSD malloc-based allocator]
|
||||
* Can also return accurate values for pointers *within* small allocations
|
||||
* The same applies to pointers within the first page of large allocations,
|
||||
otherwise it currently has to return a sentinel
|
||||
|
@ -298,38 +473,39 @@ was a bit less important and if a core goal was finding latent bugs.
|
|||
* Errors other than ENOMEM from mmap, munmap, mprotect and mremap treated
|
||||
as fatal, which can help to detect memory management gone wrong elsewhere
|
||||
in the process.
|
||||
* [future] Memory tagging for slab allocations via MTE on ARMv8.5+
|
||||
* Memory tagging for slab allocations via MTE on ARMv8.5+
|
||||
* random memory tags as the baseline, providing probabilistic protection
|
||||
against various forms of memory corruption
|
||||
* dedicated tag for free slots, set on free, for deterministic protection
|
||||
against accessing freed memory
|
||||
* store previous random tag within freed slab allocations, and increment it
|
||||
to get the next tag for that slot to provide deterministic use-after-free
|
||||
detection through multiple cycles of memory reuse
|
||||
* guarantee distinct tags for adjacent memory allocations by incrementing
|
||||
past matching values for deterministic detection of linear overflows
|
||||
* [future] store previous random tag and increment it to get the next tag
|
||||
for that slot to provide deterministic use-after-free detection through
|
||||
multiple cycles of memory reuse
|
||||
|
||||
## Randomness
|
||||
|
||||
The current implementation of random number generation for randomization-based
|
||||
mitigations is based on generating a keystream from a stream cipher (ChaCha8)
|
||||
in small chunks. A separate CSPRNG is used for each small size class, large
|
||||
allocations, etc. in order to fit into the existing fine-grained locking model
|
||||
without needing to waste memory per thread by having the CSPRNG state in Thread
|
||||
Local Storage. Similarly, it's protected via the same approach taken for the
|
||||
rest of the metadata. The stream cipher is regularly reseeded from the OS to
|
||||
provide backtracking and prediction resistance with a negligible cost. The
|
||||
reseed interval simply needs to be adjusted to the point that it stops
|
||||
registering as having any significant performance impact. The performance
|
||||
impact on recent Linux kernels is primarily from the high cost of system calls
|
||||
and locking since the implementation is quite efficient (ChaCha20), especially
|
||||
for just generating the key and nonce for another stream cipher (ChaCha8).
|
||||
in small chunks. Separate CSPRNGs are used for each small size class in each
|
||||
arena, large allocations and initialization in order to fit into the
|
||||
fine-grained locking model without needing to waste memory per thread by
|
||||
having the CSPRNG state in Thread Local Storage. Similarly, it's protected via
|
||||
the same approach taken for the rest of the metadata. The stream cipher is
|
||||
regularly reseeded from the OS to provide backtracking and prediction
|
||||
resistance with a negligible cost. The reseed interval simply needs to be
|
||||
adjusted to the point that it stops registering as having any significant
|
||||
performance impact. The performance impact on recent Linux kernels is
|
||||
primarily from the high cost of system calls and locking since the
|
||||
implementation is quite efficient (ChaCha20), especially for just generating
|
||||
the key and nonce for another stream cipher (ChaCha8).
|
||||
|
||||
ChaCha8 is a great fit because it's extremely fast across platforms without
|
||||
relying on hardware support or complex platform-specific code. The security
|
||||
margins of ChaCha20 would be completely overkill for the use case. Using
|
||||
ChaCha8 avoids needing to resort to a non-cryptographically secure PRNG or
|
||||
something without a lot of scrunity. The current implementation is simply the
|
||||
something without a lot of scrutiny. The current implementation is simply the
|
||||
reference implementation of ChaCha8 converted into a pure keystream by ripping
|
||||
out the XOR of the message into the keystream.
|
||||
|
||||
|
@ -367,41 +543,41 @@ preliminary set of values.
|
|||
| size class | worst case internal fragmentation | slab slots | slab size | internal fragmentation for slabs |
|
||||
| - | - | - | - | - |
|
||||
| 16 | 93.75% | 256 | 4096 | 0.0% |
|
||||
| 32 | 46.875% | 128 | 4096 | 0.0% |
|
||||
| 32 | 46.88% | 128 | 4096 | 0.0% |
|
||||
| 48 | 31.25% | 85 | 4096 | 0.390625% |
|
||||
| 64 | 23.4375% | 64 | 4096 | 0.0% |
|
||||
| 64 | 23.44% | 64 | 4096 | 0.0% |
|
||||
| 80 | 18.75% | 51 | 4096 | 0.390625% |
|
||||
| 96 | 15.625% | 42 | 4096 | 1.5625% |
|
||||
| 112 | 13.392857142857139% | 36 | 4096 | 1.5625% |
|
||||
| 128 | 11.71875% | 64 | 8192 | 0.0% |
|
||||
| 160 | 19.375% | 51 | 8192 | 0.390625% |
|
||||
| 192 | 16.145833333333343% | 64 | 12288 | 0.0% |
|
||||
| 224 | 13.839285714285708% | 54 | 12288 | 1.5625% |
|
||||
| 256 | 12.109375% | 64 | 16384 | 0.0% |
|
||||
| 320 | 19.6875% | 64 | 20480 | 0.0% |
|
||||
| 384 | 16.40625% | 64 | 24576 | 0.0% |
|
||||
| 448 | 14.0625% | 64 | 28672 | 0.0% |
|
||||
| 512 | 12.3046875% | 64 | 32768 | 0.0% |
|
||||
| 640 | 19.84375% | 64 | 40960 | 0.0% |
|
||||
| 768 | 16.536458333333343% | 64 | 49152 | 0.0% |
|
||||
| 896 | 14.174107142857139% | 64 | 57344 | 0.0% |
|
||||
| 1024 | 12.40234375% | 64 | 65536 | 0.0% |
|
||||
| 1280 | 19.921875% | 16 | 20480 | 0.0% |
|
||||
| 1536 | 16.6015625% | 16 | 24576 | 0.0% |
|
||||
| 1792 | 14.229910714285708% | 16 | 28672 | 0.0% |
|
||||
| 2048 | 12.451171875% | 16 | 32768 | 0.0% |
|
||||
| 2560 | 19.9609375% | 8 | 20480 | 0.0% |
|
||||
| 3072 | 16.634114583333343% | 8 | 24576 | 0.0% |
|
||||
| 3584 | 14.2578125% | 8 | 28672 | 0.0% |
|
||||
| 4096 | 12.4755859375% | 8 | 32768 | 0.0% |
|
||||
| 5120 | 19.98046875% | 8 | 40960 | 0.0% |
|
||||
| 6144 | 16.650390625% | 8 | 49152 | 0.0% |
|
||||
| 7168 | 14.271763392857139% | 8 | 57344 | 0.0% |
|
||||
| 8192 | 12.48779296875% | 8 | 65536 | 0.0% |
|
||||
| 10240 | 19.990234375% | 6 | 61440 | 0.0% |
|
||||
| 12288 | 16.658528645833343% | 5 | 61440 | 0.0% |
|
||||
| 14336 | 14.278738839285708% | 4 | 57344 | 0.0% |
|
||||
| 16384 | 12.493896484375% | 4 | 65536 | 0.0% |
|
||||
| 96 | 15.62% | 42 | 4096 | 1.5625% |
|
||||
| 112 | 13.39% | 36 | 4096 | 1.5625% |
|
||||
| 128 | 11.72% | 64 | 8192 | 0.0% |
|
||||
| 160 | 19.38% | 51 | 8192 | 0.390625% |
|
||||
| 192 | 16.15% | 64 | 12288 | 0.0% |
|
||||
| 224 | 13.84% | 54 | 12288 | 1.5625% |
|
||||
| 256 | 12.11% | 64 | 16384 | 0.0% |
|
||||
| 320 | 19.69% | 64 | 20480 | 0.0% |
|
||||
| 384 | 16.41% | 64 | 24576 | 0.0% |
|
||||
| 448 | 14.06% | 64 | 28672 | 0.0% |
|
||||
| 512 | 12.3% | 64 | 32768 | 0.0% |
|
||||
| 640 | 19.84% | 64 | 40960 | 0.0% |
|
||||
| 768 | 16.54% | 64 | 49152 | 0.0% |
|
||||
| 896 | 14.17% | 64 | 57344 | 0.0% |
|
||||
| 1024 | 12.4% | 64 | 65536 | 0.0% |
|
||||
| 1280 | 19.92% | 16 | 20480 | 0.0% |
|
||||
| 1536 | 16.6% | 16 | 24576 | 0.0% |
|
||||
| 1792 | 14.23% | 16 | 28672 | 0.0% |
|
||||
| 2048 | 12.45% | 16 | 32768 | 0.0% |
|
||||
| 2560 | 19.96% | 8 | 20480 | 0.0% |
|
||||
| 3072 | 16.63% | 8 | 24576 | 0.0% |
|
||||
| 3584 | 14.26% | 8 | 28672 | 0.0% |
|
||||
| 4096 | 12.48% | 8 | 32768 | 0.0% |
|
||||
| 5120 | 19.98% | 8 | 40960 | 0.0% |
|
||||
| 6144 | 16.65% | 8 | 49152 | 0.0% |
|
||||
| 7168 | 14.27% | 8 | 57344 | 0.0% |
|
||||
| 8192 | 12.49% | 8 | 65536 | 0.0% |
|
||||
| 10240 | 19.99% | 6 | 61440 | 0.0% |
|
||||
| 12288 | 16.66% | 5 | 61440 | 0.0% |
|
||||
| 14336 | 14.28% | 4 | 57344 | 0.0% |
|
||||
| 16384 | 12.49% | 4 | 65536 | 0.0% |
|
||||
|
||||
The slab allocation size classes end at 16384 since that's the final size for
|
||||
2048 byte spacing and the next spacing class matches the page size of 4096
|
||||
|
@ -423,18 +599,18 @@ retaining the isolation.
|
|||
|
||||
| size class | worst case internal fragmentation | slab slots | slab size | internal fragmentation for slabs |
|
||||
| - | - | - | - | - |
|
||||
| 20480 | 19.9951171875% | 2 | 40960 | 0.0% |
|
||||
| 24576 | 16.66259765625% | 2 | 49152 | 0.0% |
|
||||
| 28672 | 14.2822265625% | 2 | 57344 | 0.0% |
|
||||
| 32768 | 12.4969482421875% | 2 | 65536 | 0.0% |
|
||||
| 40960 | 19.99755859375% | 1 | 40960 | 0.0% |
|
||||
| 49152 | 16.664632161458343% | 1 | 49152 | 0.0% |
|
||||
| 57344 | 14.283970424107139% | 1 | 57344 | 0.0% |
|
||||
| 65536 | 12.49847412109375% | 1 | 65536 | 0.0% |
|
||||
| 81920 | 19.998779296875% | 1 | 81920 | 0.0% |
|
||||
| 98304 | 16.6656494140625% | 1 | 98304 | 0.0% |
|
||||
| 114688 | 14.284842354910708% | 1 | 114688 | 0.0% |
|
||||
| 131072 | 12.499237060546875% | 1 | 131072 | 0.0% |
|
||||
| 20480 | 20.0% | 1 | 20480 | 0.0% |
|
||||
| 24576 | 16.66% | 1 | 24576 | 0.0% |
|
||||
| 28672 | 14.28% | 1 | 28672 | 0.0% |
|
||||
| 32768 | 12.5% | 1 | 32768 | 0.0% |
|
||||
| 40960 | 20.0% | 1 | 40960 | 0.0% |
|
||||
| 49152 | 16.66% | 1 | 49152 | 0.0% |
|
||||
| 57344 | 14.28% | 1 | 57344 | 0.0% |
|
||||
| 65536 | 12.5% | 1 | 65536 | 0.0% |
|
||||
| 81920 | 20.0% | 1 | 81920 | 0.0% |
|
||||
| 98304 | 16.67% | 1 | 98304 | 0.0% |
|
||||
| 114688 | 14.28% | 1 | 114688 | 0.0% |
|
||||
| 131072 | 12.5% | 1 | 131072 | 0.0% |
|
||||
|
||||
The `CONFIG_LARGE_SIZE_CLASSES` option controls whether large allocations use
|
||||
the same size class scheme providing 4 size classes for every doubling of size.
|
||||
|
@ -461,7 +637,7 @@ to finding the per-size-class metadata. The part that's still open to different
|
|||
design choices is how arenas are assigned to threads. One approach is
|
||||
statically assigning arenas via round-robin like the standard jemalloc
|
||||
implementation, or statically assigning to a random arena which is essentially
|
||||
the current implementation. Another option is dynamic load balancing via a
|
||||
the current implementation. Another option is dynamic load balancing via a
|
||||
heuristic like `sched_getcpu` for per-CPU arenas, which would offer better
|
||||
performance than randomly choosing an arena each time while being more
|
||||
predictable for an attacker. There are actually some security benefits from
|
||||
|
@ -472,7 +648,7 @@ varying usage of size classes.
|
|||
When there's substantial allocation or deallocation pressure, the allocator
|
||||
does end up calling into the kernel to purge / protect unused slabs by
|
||||
replacing them with fresh `PROT_NONE` regions along with unprotecting slabs
|
||||
when partially filled and cached empty slabs are depleted. There will be
|
||||
when partially filled and cached empty slabs are depleted. There will be
|
||||
configuration over the amount of cached empty slabs, but it's not entirely a
|
||||
performance vs. memory trade-off since memory protecting unused slabs is a nice
|
||||
opportunistic boost to security. However, it's not really part of the core
|
||||
|
@ -548,77 +724,46 @@ freeing as there would be if the kernel supported these features directly.
|
|||
|
||||
## Memory tagging
|
||||
|
||||
Integrating extensive support for ARMv8.5 memory tagging is planned and this
|
||||
section will be expanded cover the details on the chosen design. The approach
|
||||
for slab allocations is currently covered, but it can also be used for the
|
||||
allocator metadata region and large allocations.
|
||||
Random tags are set for all slab allocations when allocated, with 4 excluded values:
|
||||
|
||||
Memory allocations are already always multiples of naturally aligned 16 byte
|
||||
units, so memory tags are a natural fit into a malloc implementation due to the
|
||||
16 byte alignment requirement. The only extra memory consumption will come from
|
||||
the hardware supported storage for the tag values (4 bits per 16 bytes).
|
||||
1. the reserved `0` tag
|
||||
2. the previous tag used for the slot
|
||||
3. the current (or previous) tag used for the slot to the left
|
||||
4. the current (or previous) tag used for the slot to the right
|
||||
|
||||
The baseline policy will be to generate random tags for each slab allocation
|
||||
slot on first use. The highest value will be reserved for marking freed memory
|
||||
allocations to detect any accesses to freed memory so it won't be part of the
|
||||
generated range. Adjacent slots will be guaranteed to have distinct memory tags
|
||||
in order to guarantee that linear overflows are detected. There are a few ways
|
||||
of implementing this and it will end up depending on the performance costs of
|
||||
different approaches. If there's an efficient way to fetch the adjacent tag
|
||||
values without wasting extra memory, it will be possible to check for them and
|
||||
skip them either by generating a new random value in a loop or incrementing
|
||||
past them since the tiny bit of bias wouldn't matter. Another approach would be
|
||||
alternating odd and even tag values but that would substantially reduce the
|
||||
overall randomness of the tags and there's very little entropy from the start.
|
||||
When a slab allocation is freed, the reserved `0` tag is set for the slot.
|
||||
Slab allocation slots are cleared before reuse when memory tagging is enabled.
|
||||
|
||||
Once a slab allocation has been freed, the tag will be set to the reserved
|
||||
value for free memory and the previous tag value will be stored inside the
|
||||
allocation itself. The next time the slot is allocated, the chosen tag value
|
||||
will be the previous value incremented by one to provide use-after-free
|
||||
detection between generations of allocations. The stored tag will be wiped
|
||||
before retagging the memory, to avoid leaking it and as part of preserving the
|
||||
security property of newly allocated memory being zeroed due to zero-on-free.
|
||||
It will eventually wrap all the way around, but this ends up providing a strong
|
||||
guarantee for many allocation cycles due to the combination of 4 bit tags with
|
||||
the FIFO quarantine feature providing delayed free. It also benefits from
|
||||
random slot allocation and the randomized portion of delayed free, which result
|
||||
in a further delay along with preventing a deterministic bypass by forcing a
|
||||
reuse after a certain number of allocation cycles. Similarly to the initial tag
|
||||
generation, tag values for adjacent allocations will be skipped by incrementing
|
||||
past them.
|
||||
This ensures the following properties:
|
||||
|
||||
For example, consider this slab of allocations that are not yet used with 16
|
||||
representing the tag for free memory. For the sake of simplicity, there will be
|
||||
no quarantine or other slabs for this example:
|
||||
- Linear overflows are deterministically detected.
|
||||
- Use-after-free are deterministically detected until the freed slot goes through
|
||||
both the random and FIFO quarantines, gets allocated again, goes through both
|
||||
quarantines again and then finally gets allocated again for a 2nd time.
|
||||
- Since the default `0` tag is reserved, untagged pointers can't access slab
|
||||
allocations and vice versa.
|
||||
|
||||
| 16 | 16 | 16 | 16 | 16 | 16 |
|
||||
Slab allocations are done in a statically reserved region for each size class
|
||||
and all metadata is in a statically reserved region, so interactions between
|
||||
different uses of the same address space is not applicable.
|
||||
|
||||
Three slots are randomly chosen for allocations, with random tags assigned (2,
|
||||
15, 7) since these slots haven't ever been used and don't have saved values:
|
||||
Large allocations beyond the largest slab allocation size class (128k by
|
||||
default) are guaranteed to have randomly sized guard regions to the left and
|
||||
right. Random and FIFO address space quarantines provide use-after-free
|
||||
detection. We need to test whether the cost of random tags is acceptable to enabled them by default,
|
||||
since they would be useful for:
|
||||
|
||||
| 16 | 2 | 16 | 15 | 7 | 16 |
|
||||
- probabilistic detection of overflows
|
||||
- probabilistic detection of use-after-free once the address space is
|
||||
out of the quarantine and reused for another allocation
|
||||
- deterministic detection of use-after-free for reuse by another allocator.
|
||||
|
||||
The 2nd allocation slot is freed, and is set back to the tag for free memory
|
||||
(16), but with the previous tag value stored in the freed space:
|
||||
|
||||
| 16 | 16 | 16 | 7 | 15 | 16 |
|
||||
|
||||
The first slot is allocated for the first time, receiving the random value 3:
|
||||
|
||||
| 3 | 16 | 16 | 7 | 15 | 16 |
|
||||
|
||||
The 2nd slot is randomly chosen again, so the previous tag (2) is retrieved and
|
||||
incremented to 3 as part of the use-after-free mitigation. An adjacent
|
||||
allocation already uses the tag 3, so the tag is further incremented to 4 (it
|
||||
would be incremented to 5 if one of the adjacent tags was 4):
|
||||
|
||||
| 3 | 4 | 16 | 7 | 15 | 16 |
|
||||
|
||||
The last slot is randomly chosen for the next alocation, and is assigned the
|
||||
random value 15. However, it's placed next to an allocation with the tag 15 so
|
||||
the tag is incremented and wraps around to 0:
|
||||
|
||||
| 3 | 4 | 16 | 7 | 15 | 0 |
|
||||
When memory tagging is enabled, checking for write-after-free at allocation
|
||||
time and checking canaries are both disabled. Canaries will be more thoroughly
|
||||
disabled when using memory tagging in the future, but Android currently has
|
||||
[very dynamic memory tagging support](https://source.android.com/docs/security/test/memory-safety/arm-mte)
|
||||
where it can be disabled at any time which creates a barrier to optimizing
|
||||
by disabling redundant features.
|
||||
|
||||
## API extensions
|
||||
|
||||
|
@ -647,6 +792,183 @@ this implementation, it retrieves an upper bound on the size for small memory
|
|||
allocations based on calculating the size class region. This function is safe
|
||||
to use from signal handlers already.
|
||||
|
||||
## Stats
|
||||
|
||||
If stats are enabled, hardened\_malloc keeps tracks allocator statistics in
|
||||
order to provide implementations of `mallinfo` and `malloc_info`.
|
||||
|
||||
On Android, `mallinfo` is used for [mallinfo-based garbage collection
|
||||
triggering](https://developer.android.com/preview/features#mallinfo) so
|
||||
hardened\_malloc enables `CONFIG_STATS` by default. The `malloc_info`
|
||||
implementation on Android is the standard one in Bionic, with the information
|
||||
provided to Bionic via Android's internal extended `mallinfo` API with support
|
||||
for arenas and size class bins. This means the `malloc_info` output is fully
|
||||
compatible, including still having `jemalloc-1` as the version of the data
|
||||
format to retain compatibility with existing tooling.
|
||||
|
||||
On non-Android Linux, `mallinfo` has zeroed fields even with `CONFIG_STATS`
|
||||
enabled because glibc `mallinfo` is inherently broken. It defines the fields as
|
||||
`int` instead of `size_t`, resulting in undefined signed overflows. It also
|
||||
misuses the fields and provides a strange, idiosyncratic set of values rather
|
||||
than following the SVID/XPG `mallinfo` definition. The `malloc_info` function
|
||||
is still provided, with a similar format as what Android uses, with tweaks for
|
||||
hardened\_malloc and the version set to `hardened_malloc-1`. The data format
|
||||
may be changed in the future.
|
||||
|
||||
As an example, consider the following program from the hardened\_malloc tests:
|
||||
|
||||
```c
|
||||
#include <pthread.h>
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
void leak_memory(void) {
|
||||
(void)malloc(1024 * 1024 * 1024);
|
||||
(void)malloc(16);
|
||||
(void)malloc(32);
|
||||
(void)malloc(4096);
|
||||
}
|
||||
|
||||
void *do_work(void *p) {
|
||||
leak_memory();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
pthread_t thread[4];
|
||||
for (int i = 0; i < 4; i++) {
|
||||
pthread_create(&thread[i], NULL, do_work, NULL);
|
||||
}
|
||||
for (int i = 0; i < 4; i++) {
|
||||
pthread_join(thread[i], NULL);
|
||||
}
|
||||
|
||||
malloc_info(0, stdout);
|
||||
}
|
||||
```
|
||||
|
||||
This produces the following output when piped through `xmllint --format -`:
|
||||
|
||||
```xml
|
||||
<?xml version="1.0"?>
|
||||
<malloc version="hardened_malloc-1">
|
||||
<heap nr="0">
|
||||
<bin nr="2" size="32">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>32</allocated>
|
||||
</bin>
|
||||
<bin nr="3" size="48">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>48</allocated>
|
||||
</bin>
|
||||
<bin nr="13" size="320">
|
||||
<nmalloc>4</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>20480</slab_allocated>
|
||||
<allocated>1280</allocated>
|
||||
</bin>
|
||||
<bin nr="29" size="5120">
|
||||
<nmalloc>2</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>40960</slab_allocated>
|
||||
<allocated>10240</allocated>
|
||||
</bin>
|
||||
<bin nr="45" size="81920">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>81920</slab_allocated>
|
||||
<allocated>81920</allocated>
|
||||
</bin>
|
||||
</heap>
|
||||
<heap nr="1">
|
||||
<bin nr="2" size="32">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>32</allocated>
|
||||
</bin>
|
||||
<bin nr="3" size="48">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>48</allocated>
|
||||
</bin>
|
||||
<bin nr="29" size="5120">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>40960</slab_allocated>
|
||||
<allocated>5120</allocated>
|
||||
</bin>
|
||||
</heap>
|
||||
<heap nr="2">
|
||||
<bin nr="2" size="32">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>32</allocated>
|
||||
</bin>
|
||||
<bin nr="3" size="48">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>48</allocated>
|
||||
</bin>
|
||||
<bin nr="29" size="5120">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>40960</slab_allocated>
|
||||
<allocated>5120</allocated>
|
||||
</bin>
|
||||
</heap>
|
||||
<heap nr="3">
|
||||
<bin nr="2" size="32">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>32</allocated>
|
||||
</bin>
|
||||
<bin nr="3" size="48">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>4096</slab_allocated>
|
||||
<allocated>48</allocated>
|
||||
</bin>
|
||||
<bin nr="29" size="5120">
|
||||
<nmalloc>1</nmalloc>
|
||||
<ndalloc>0</ndalloc>
|
||||
<slab_allocated>40960</slab_allocated>
|
||||
<allocated>5120</allocated>
|
||||
</bin>
|
||||
</heap>
|
||||
<heap nr="4">
|
||||
<allocated_large>4294967296</allocated_large>
|
||||
</heap>
|
||||
</malloc>
|
||||
```
|
||||
|
||||
The heap entries correspond to the arenas. Unlike jemalloc, hardened\_malloc
|
||||
doesn't handle large allocations within the arenas, so it presents those in the
|
||||
`malloc_info` statistics as a separate arena dedicated to large allocations.
|
||||
For example, with 4 arenas enabled, there will be a 5th arena in the statistics
|
||||
for the large allocations.
|
||||
|
||||
The `nmalloc` / `ndalloc` fields are 64-bit integers tracking allocation and
|
||||
deallocation count. These are defined as wrapping on overflow, per the jemalloc
|
||||
implementation.
|
||||
|
||||
See the [section on size classes](#size-classes) to map the size class bin
|
||||
number to the corresponding size class. The bin index begins at 0, mapping to
|
||||
the 0 byte size class, followed by 1 for the 16 bytes, 2 for 32 bytes, etc. and
|
||||
large allocations are treated as one group.
|
||||
|
||||
When stats aren't enabled, the `malloc_info` output will be an empty `malloc`
|
||||
element.
|
||||
|
||||
## System calls
|
||||
|
||||
This is intended to aid with creating system call whitelists via seccomp-bpf
|
||||
|
@ -665,6 +987,7 @@ System calls used by all build configurations:
|
|||
* `mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new)`
|
||||
* `munmap`
|
||||
* `write(STDERR_FILENO, buf, len)` (before aborting due to memory corruption)
|
||||
* `madvise(ptr, size, MADV_DONTNEED)`
|
||||
|
||||
The main distinction from a typical malloc implementation is the use of
|
||||
getrandom. A common compatibility issue is that existing system call whitelists
|
||||
|
@ -677,7 +1000,6 @@ Additional system calls when `CONFIG_SEAL_METADATA=true` is set:
|
|||
* `pkey_alloc`
|
||||
* `pkey_mprotect` instead of `mprotect` with an additional `pkey` parameter,
|
||||
but otherwise the same (regular `mprotect` is never called)
|
||||
* `uname` (to detect old buggy kernel versions)
|
||||
|
||||
Additional system calls for Android builds with `LABEL_MEMORY`:
|
||||
|
||||
|
|
25
androidtest/Android.bp
Normal file
25
androidtest/Android.bp
Normal file
|
@ -0,0 +1,25 @@
|
|||
java_test_host {
|
||||
name: "HMallocTest",
|
||||
srcs: [
|
||||
"src/**/*.java",
|
||||
],
|
||||
|
||||
libs: [
|
||||
"tradefed",
|
||||
"compatibility-tradefed",
|
||||
"compatibility-host-util",
|
||||
],
|
||||
|
||||
static_libs: [
|
||||
"cts-host-utils",
|
||||
"frameworks-base-hostutils",
|
||||
],
|
||||
|
||||
test_suites: [
|
||||
"general-tests",
|
||||
],
|
||||
|
||||
data_device_bins_64: [
|
||||
"memtag_test",
|
||||
],
|
||||
}
|
13
androidtest/AndroidTest.xml
Normal file
13
androidtest/AndroidTest.xml
Normal file
|
@ -0,0 +1,13 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<configuration description="hardened_malloc test">
|
||||
|
||||
<target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
|
||||
<option name="cleanup" value="true" />
|
||||
<option name="push" value="memtag_test->/data/local/tmp/memtag_test" />
|
||||
</target_preparer>
|
||||
|
||||
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
|
||||
<option name="jar" value="HMallocTest.jar" />
|
||||
</test>
|
||||
|
||||
</configuration>
|
17
androidtest/memtag/Android.bp
Normal file
17
androidtest/memtag/Android.bp
Normal file
|
@ -0,0 +1,17 @@
|
|||
cc_test {
|
||||
name: "memtag_test",
|
||||
srcs: ["memtag_test.cc"],
|
||||
cflags: [
|
||||
"-Wall",
|
||||
"-Werror",
|
||||
"-Wextra",
|
||||
"-O0",
|
||||
"-march=armv9-a+memtag",
|
||||
],
|
||||
|
||||
compile_multilib: "64",
|
||||
|
||||
sanitize: {
|
||||
memtag_heap: true,
|
||||
},
|
||||
}
|
351
androidtest/memtag/memtag_test.cc
Normal file
351
androidtest/memtag/memtag_test.cc
Normal file
|
@ -0,0 +1,351 @@
|
|||
// needed to uncondionally enable assertions
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
#include <malloc.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "../../arm_mte.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
using u8 = uint8_t;
|
||||
using uptr = uintptr_t;
|
||||
using u64 = uint64_t;
|
||||
|
||||
const size_t DEFAULT_ALLOC_SIZE = 8;
|
||||
const size_t CANARY_SIZE = 8;
|
||||
|
||||
void do_context_switch() {
|
||||
utsname s;
|
||||
uname(&s);
|
||||
}
|
||||
|
||||
u8 get_pointer_tag(void *ptr) {
|
||||
return (((uptr) ptr) >> 56) & 0xf;
|
||||
}
|
||||
|
||||
void *untag_pointer(void *ptr) {
|
||||
const uintptr_t mask = UINTPTR_MAX >> 8;
|
||||
return (void *) ((uintptr_t) ptr & mask);
|
||||
}
|
||||
|
||||
void *set_pointer_tag(void *ptr, u8 tag) {
|
||||
return (void *) (((uintptr_t) tag << 56) | (uintptr_t) untag_pointer(ptr));
|
||||
}
|
||||
|
||||
// This test checks that slab slot allocation uses tag that is distint from tags of its neighbors
|
||||
// and from the tag of the previous allocation that used the same slot
|
||||
void tag_distinctness() {
|
||||
// tag 0 is reserved
|
||||
const int min_tag = 1;
|
||||
const int max_tag = 0xf;
|
||||
|
||||
struct SizeClass {
|
||||
int size;
|
||||
int slot_cnt;
|
||||
};
|
||||
|
||||
// values from size_classes[] and size_class_slots[] in h_malloc.c
|
||||
SizeClass size_classes[] = {
|
||||
{ .size = 16, .slot_cnt = 256, },
|
||||
{ .size = 32, .slot_cnt = 128, },
|
||||
// this size class is used by allocations that are made by the addr_tag_map, which breaks
|
||||
// tag distinctess checks
|
||||
// { .size = 48, .slot_cnt = 85, },
|
||||
{ .size = 64, .slot_cnt = 64, },
|
||||
{ .size = 80, .slot_cnt = 51, },
|
||||
{ .size = 96, .slot_cnt = 42, },
|
||||
{ .size = 112, .slot_cnt = 36, },
|
||||
{ .size = 128, .slot_cnt = 64, },
|
||||
{ .size = 160, .slot_cnt = 51, },
|
||||
{ .size = 192, .slot_cnt = 64, },
|
||||
{ .size = 224, .slot_cnt = 54, },
|
||||
{ .size = 10240, .slot_cnt = 6, },
|
||||
{ .size = 20480, .slot_cnt = 1, },
|
||||
};
|
||||
|
||||
int tag_usage[max_tag + 1];
|
||||
|
||||
for (size_t sc_idx = 0; sc_idx < sizeof(size_classes) / sizeof(SizeClass); ++sc_idx) {
|
||||
SizeClass &sc = size_classes[sc_idx];
|
||||
|
||||
const size_t full_alloc_size = sc.size;
|
||||
const size_t alloc_size = full_alloc_size - CANARY_SIZE;
|
||||
|
||||
// "tdc" is short for "tag distinctness check"
|
||||
int left_neighbor_tdc_cnt = 0;
|
||||
int right_neighbor_tdc_cnt = 0;
|
||||
int prev_alloc_tdc_cnt = 0;
|
||||
|
||||
int iter_cnt = 600;
|
||||
|
||||
unordered_map<uptr, u8> addr_tag_map;
|
||||
addr_tag_map.reserve(iter_cnt * sc.slot_cnt);
|
||||
|
||||
u64 seen_tags = 0;
|
||||
|
||||
for (int iter = 0; iter < iter_cnt; ++iter) {
|
||||
uptr allocations[256]; // 256 is max slot count
|
||||
|
||||
for (int i = 0; i < sc.slot_cnt; ++i) {
|
||||
u8 *p = (u8 *) malloc(alloc_size);
|
||||
assert(p);
|
||||
uptr addr = (uptr) untag_pointer(p);
|
||||
u8 tag = get_pointer_tag(p);
|
||||
|
||||
assert(tag >= min_tag && tag <= max_tag);
|
||||
seen_tags |= 1 << tag;
|
||||
++tag_usage[tag];
|
||||
|
||||
// check most recent tags of left and right neighbors
|
||||
|
||||
auto left = addr_tag_map.find(addr - full_alloc_size);
|
||||
if (left != addr_tag_map.end()) {
|
||||
assert(left->second != tag);
|
||||
++left_neighbor_tdc_cnt;
|
||||
}
|
||||
|
||||
auto right = addr_tag_map.find(addr + full_alloc_size);
|
||||
if (right != addr_tag_map.end()) {
|
||||
assert(right->second != tag);
|
||||
++right_neighbor_tdc_cnt;
|
||||
}
|
||||
|
||||
// check previous tag of this slot
|
||||
auto prev = addr_tag_map.find(addr);
|
||||
if (prev != addr_tag_map.end()) {
|
||||
assert(prev->second != tag);
|
||||
++prev_alloc_tdc_cnt;
|
||||
addr_tag_map.erase(addr);
|
||||
}
|
||||
|
||||
addr_tag_map.emplace(addr, tag);
|
||||
|
||||
for (size_t j = 0; j < alloc_size; ++j) {
|
||||
// check that slot is zeroed
|
||||
assert(p[j] == 0);
|
||||
// check that slot is readable and writable
|
||||
p[j]++;
|
||||
}
|
||||
|
||||
allocations[i] = addr;
|
||||
}
|
||||
|
||||
// free some of allocations to allow their slots to be reused
|
||||
for (int i = sc.slot_cnt - 1; i >= 0; i -= 2) {
|
||||
free((void *) allocations[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// check that all of the tags were used, except for the reserved tag 0
|
||||
assert(seen_tags == (0xffff & ~(1 << 0)));
|
||||
|
||||
printf("size_class\t%i\t" "tdc_left %i\t" "tdc_right %i\t" "tdc_prev_alloc %i\n",
|
||||
sc.size, left_neighbor_tdc_cnt, right_neighbor_tdc_cnt, prev_alloc_tdc_cnt);
|
||||
|
||||
// make sure tag distinctess checks were actually performed
|
||||
int min_tdc_cnt = sc.slot_cnt * iter_cnt / 5;
|
||||
|
||||
assert(prev_alloc_tdc_cnt > min_tdc_cnt);
|
||||
|
||||
if (sc.slot_cnt > 1) {
|
||||
assert(left_neighbor_tdc_cnt > min_tdc_cnt);
|
||||
assert(right_neighbor_tdc_cnt > min_tdc_cnt);
|
||||
}
|
||||
|
||||
// async tag check failures are reported on context switch
|
||||
do_context_switch();
|
||||
}
|
||||
|
||||
printf("\nTag use counters:\n");
|
||||
|
||||
int min = INT_MAX;
|
||||
int max = 0;
|
||||
double geomean = 0.0;
|
||||
for (int i = min_tag; i <= max_tag; ++i) {
|
||||
int v = tag_usage[i];
|
||||
geomean += log(v);
|
||||
min = std::min(min, v);
|
||||
max = std::max(max, v);
|
||||
printf("%i\t%i\n", i, tag_usage[i]);
|
||||
}
|
||||
int tag_cnt = 1 + max_tag - min_tag;
|
||||
geomean = exp(geomean / tag_cnt);
|
||||
|
||||
double max_deviation = std::max((double) max - geomean, geomean - min);
|
||||
|
||||
printf("geomean: %.2f, max deviation from geomean: %.2f%%\n", geomean, (100.0 * max_deviation) / geomean);
|
||||
}
|
||||
|
||||
u8* alloc_default() {
|
||||
const size_t full_alloc_size = DEFAULT_ALLOC_SIZE + CANARY_SIZE;
|
||||
set<uptr> addrs;
|
||||
|
||||
// make sure allocation has both left and right neighbors, otherwise overflow/underflow tests
|
||||
// will fail when allocation is at the end/beginning of slab
|
||||
for (;;) {
|
||||
u8 *p = (u8 *) malloc(DEFAULT_ALLOC_SIZE);
|
||||
assert(p);
|
||||
|
||||
uptr addr = (uptr) untag_pointer(p);
|
||||
uptr left = addr - full_alloc_size;
|
||||
if (addrs.find(left) != addrs.end()) {
|
||||
uptr right = addr + full_alloc_size;
|
||||
if (addrs.find(right) != addrs.end()) {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
addrs.emplace(addr);
|
||||
}
|
||||
}
|
||||
|
||||
int expected_segv_code;
|
||||
|
||||
#define expect_segv(exp, segv_code) ({\
|
||||
expected_segv_code = segv_code; \
|
||||
volatile auto val = exp; \
|
||||
(void) val; \
|
||||
do_context_switch(); \
|
||||
fprintf(stderr, "didn't receive SEGV code %i", segv_code); \
|
||||
exit(1); })
|
||||
|
||||
// it's expected that the device is configured to use asymm MTE tag checking mode (sync read checks,
|
||||
// async write checks)
|
||||
#define expect_read_segv(exp) expect_segv(exp, SEGV_MTESERR)
|
||||
#define expect_write_segv(exp) expect_segv(exp, SEGV_MTEAERR)
|
||||
|
||||
void read_after_free() {
|
||||
u8 *p = alloc_default();
|
||||
free(p);
|
||||
expect_read_segv(p[0]);
|
||||
}
|
||||
|
||||
void write_after_free() {
|
||||
u8 *p = alloc_default();
|
||||
free(p);
|
||||
expect_write_segv(p[0] = 1);
|
||||
}
|
||||
|
||||
void underflow_read() {
|
||||
u8 *p = alloc_default();
|
||||
expect_read_segv(p[-1]);
|
||||
}
|
||||
|
||||
void underflow_write() {
|
||||
u8 *p = alloc_default();
|
||||
expect_write_segv(p[-1] = 1);
|
||||
}
|
||||
|
||||
void overflow_read() {
|
||||
u8 *p = alloc_default();
|
||||
expect_read_segv(p[DEFAULT_ALLOC_SIZE + CANARY_SIZE]);
|
||||
}
|
||||
|
||||
void overflow_write() {
|
||||
u8 *p = alloc_default();
|
||||
expect_write_segv(p[DEFAULT_ALLOC_SIZE + CANARY_SIZE] = 1);
|
||||
}
|
||||
|
||||
void untagged_read() {
|
||||
u8 *p = alloc_default();
|
||||
p = (u8 *) untag_pointer(p);
|
||||
expect_read_segv(p[0]);
|
||||
}
|
||||
|
||||
void untagged_write() {
|
||||
u8 *p = alloc_default();
|
||||
p = (u8 *) untag_pointer(p);
|
||||
expect_write_segv(p[0] = 1);
|
||||
}
|
||||
|
||||
// checks that each of memory locations inside the buffer is tagged with expected_tag
|
||||
void check_tag(void *buf, size_t len, u8 expected_tag) {
|
||||
for (size_t i = 0; i < len; ++i) {
|
||||
assert(get_pointer_tag(__arm_mte_get_tag((void *) ((uintptr_t) buf + i))) == expected_tag);
|
||||
}
|
||||
}
|
||||
|
||||
void madvise_dontneed() {
|
||||
const size_t len = 100'000;
|
||||
void *ptr = mmap(NULL, len, PROT_READ | PROT_WRITE | PROT_MTE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
assert(ptr != MAP_FAILED);
|
||||
|
||||
// check that 0 is the initial tag
|
||||
check_tag(ptr, len, 0);
|
||||
|
||||
arm_mte_tag_and_clear_mem(set_pointer_tag(ptr, 1), len);
|
||||
check_tag(ptr, len, 1);
|
||||
|
||||
memset(set_pointer_tag(ptr, 1), 1, len);
|
||||
|
||||
assert(madvise(ptr, len, MADV_DONTNEED) == 0);
|
||||
// check that MADV_DONTNEED resets the tag
|
||||
check_tag(ptr, len, 0);
|
||||
|
||||
// check that MADV_DONTNEED clears the memory
|
||||
for (size_t i = 0; i < len; ++i) {
|
||||
assert(((u8 *) ptr)[i] == 0);
|
||||
}
|
||||
|
||||
// check that mistagged read after MADV_DONTNEED fails
|
||||
expect_read_segv(*((u8 *) set_pointer_tag(ptr, 1)));
|
||||
}
|
||||
|
||||
map<string, function<void()>> tests = {
|
||||
#define TEST(s) { #s, s }
|
||||
TEST(tag_distinctness),
|
||||
TEST(read_after_free),
|
||||
TEST(write_after_free),
|
||||
TEST(overflow_read),
|
||||
TEST(overflow_write),
|
||||
TEST(underflow_read),
|
||||
TEST(underflow_write),
|
||||
TEST(untagged_read),
|
||||
TEST(untagged_write),
|
||||
TEST(madvise_dontneed),
|
||||
#undef TEST
|
||||
};
|
||||
|
||||
void segv_handler(int, siginfo_t *si, void *) {
|
||||
if (expected_segv_code == 0 || expected_segv_code != si->si_code) {
|
||||
fprintf(stderr, "received unexpected SEGV_CODE %i", si->si_code);
|
||||
exit(139); // standard exit code for SIGSEGV
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
setbuf(stdout, NULL);
|
||||
assert(argc == 2);
|
||||
|
||||
auto test_name = string(argv[1]);
|
||||
auto test_fn = tests[test_name];
|
||||
assert(test_fn != nullptr);
|
||||
|
||||
assert(mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_ASYNC) == 1);
|
||||
|
||||
struct sigaction sa = {
|
||||
.sa_sigaction = segv_handler,
|
||||
.sa_flags = SA_SIGINFO,
|
||||
};
|
||||
|
||||
assert(sigaction(SIGSEGV, &sa, nullptr) == 0);
|
||||
|
||||
test_fn();
|
||||
do_context_switch();
|
||||
|
||||
return 0;
|
||||
}
|
79
androidtest/src/grapheneos/hmalloc/MemtagTest.java
Normal file
79
androidtest/src/grapheneos/hmalloc/MemtagTest.java
Normal file
|
@ -0,0 +1,79 @@
|
|||
package grapheneos.hmalloc;
|
||||
|
||||
import com.android.tradefed.device.DeviceNotAvailableException;
|
||||
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
|
||||
import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
@RunWith(DeviceJUnit4ClassRunner.class)
|
||||
public class MemtagTest extends BaseHostJUnit4Test {
|
||||
private static final String TEST_BINARY = "/data/local/tmp/memtag_test";
|
||||
|
||||
private void runTest(String name) throws DeviceNotAvailableException {
|
||||
var args = new ArrayList<String>();
|
||||
args.add(TEST_BINARY);
|
||||
args.add(name);
|
||||
String cmdLine = String.join(" ", args);
|
||||
|
||||
var result = getDevice().executeShellV2Command(cmdLine);
|
||||
|
||||
assertEquals("stderr", "", result.getStderr());
|
||||
assertEquals("process exit code", 0, result.getExitCode().intValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void tag_distinctness() throws DeviceNotAvailableException {
|
||||
runTest("tag_distinctness");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void read_after_free() throws DeviceNotAvailableException {
|
||||
runTest("read_after_free");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void write_after_free() throws DeviceNotAvailableException {
|
||||
runTest("write_after_free");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void underflow_read() throws DeviceNotAvailableException {
|
||||
runTest("underflow_read");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void underflow_write() throws DeviceNotAvailableException {
|
||||
runTest("underflow_write");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void overflow_read() throws DeviceNotAvailableException {
|
||||
runTest("overflow_read");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void overflow_write() throws DeviceNotAvailableException {
|
||||
runTest("overflow_write");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void untagged_read() throws DeviceNotAvailableException {
|
||||
runTest("untagged_read");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void untagged_write() throws DeviceNotAvailableException {
|
||||
runTest("untagged_write");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void madvise_dontneed() throws DeviceNotAvailableException {
|
||||
runTest("madvise_dontneed");
|
||||
}
|
||||
}
|
91
arm_mte.h
Normal file
91
arm_mte.h
Normal file
|
@ -0,0 +1,91 @@
|
|||
#ifndef ARM_MTE_H
|
||||
#define ARM_MTE_H
|
||||
|
||||
#include <arm_acle.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// Returns a tagged pointer.
|
||||
// See https://developer.arm.com/documentation/ddi0602/2023-09/Base-Instructions/IRG--Insert-Random-Tag-
|
||||
static inline void *arm_mte_create_random_tag(void *p, uint64_t exclusion_mask) {
|
||||
return __arm_mte_create_random_tag(p, exclusion_mask);
|
||||
}
|
||||
|
||||
// Tag the memory region with the tag specified in tag bits of tagged_ptr. Memory region itself is
|
||||
// zeroed.
|
||||
// tagged_ptr has to be aligned by 16, and len has to be a multiple of 16 (tag granule size).
|
||||
//
|
||||
// Arm's software optimization guide says:
|
||||
// "it is recommended to use STZGM (or DCZGVA) to set tag if data is not a concern." (STZGM and
|
||||
// DCGZVA are zeroing variants of tagging instructions).
|
||||
//
|
||||
// Contents of this function were copied from scudo:
|
||||
// https://android.googlesource.com/platform/external/scudo/+/refs/tags/android-14.0.0_r1/standalone/memtag.h#167
|
||||
//
|
||||
// scudo is licensed under the Apache License v2.0 with LLVM Exceptions, which is compatible with
|
||||
// the hardened_malloc's MIT license
|
||||
static inline void arm_mte_tag_and_clear_mem(void *tagged_ptr, size_t len) {
|
||||
uintptr_t Begin = (uintptr_t) tagged_ptr;
|
||||
uintptr_t End = Begin + len;
|
||||
uintptr_t LineSize, Next, Tmp;
|
||||
__asm__ __volatile__(
|
||||
".arch_extension memtag \n\t"
|
||||
|
||||
// Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
|
||||
// of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
|
||||
// indicates that the DC instructions are unavailable.
|
||||
"DCZID .req %[Tmp] \n\t"
|
||||
"mrs DCZID, dczid_el0 \n\t"
|
||||
"tbnz DCZID, #4, 3f \n\t"
|
||||
"and DCZID, DCZID, #15 \n\t"
|
||||
"mov %[LineSize], #4 \n\t"
|
||||
"lsl %[LineSize], %[LineSize], DCZID \n\t"
|
||||
".unreq DCZID \n\t"
|
||||
|
||||
// Our main loop doesn't handle the case where we don't need to perform any
|
||||
// DC GZVA operations. If the size of our tagged region is less than
|
||||
// twice the cache line size, bail out to the slow path since it's not
|
||||
// guaranteed that we'll be able to do a DC GZVA.
|
||||
"Size .req %[Tmp] \n\t"
|
||||
"sub Size, %[End], %[Cur] \n\t"
|
||||
"cmp Size, %[LineSize], lsl #1 \n\t"
|
||||
"b.lt 3f \n\t"
|
||||
".unreq Size \n\t"
|
||||
|
||||
"LineMask .req %[Tmp] \n\t"
|
||||
"sub LineMask, %[LineSize], #1 \n\t"
|
||||
|
||||
// STZG until the start of the next cache line.
|
||||
"orr %[Next], %[Cur], LineMask \n\t"
|
||||
|
||||
"1:\n\t"
|
||||
"stzg %[Cur], [%[Cur]], #16 \n\t"
|
||||
"cmp %[Cur], %[Next] \n\t"
|
||||
"b.lt 1b \n\t"
|
||||
|
||||
// DC GZVA cache lines until we have no more full cache lines.
|
||||
"bic %[Next], %[End], LineMask \n\t"
|
||||
".unreq LineMask \n\t"
|
||||
|
||||
"2: \n\t"
|
||||
"dc gzva, %[Cur] \n\t"
|
||||
"add %[Cur], %[Cur], %[LineSize] \n\t"
|
||||
"cmp %[Cur], %[Next] \n\t"
|
||||
"b.lt 2b \n\t"
|
||||
|
||||
// STZG until the end of the tagged region. This loop is also used to handle
|
||||
// slow path cases.
|
||||
|
||||
"3: \n\t"
|
||||
"cmp %[Cur], %[End] \n\t"
|
||||
"b.ge 4f \n\t"
|
||||
"stzg %[Cur], [%[Cur]], #16 \n\t"
|
||||
"b 3b \n\t"
|
||||
|
||||
"4: \n\t"
|
||||
|
||||
: [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next), [Tmp] "=&r"(Tmp)
|
||||
: [End] "r"(End)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
#endif
|
|
@ -49,7 +49,7 @@ for size, slots, fragmentation in zip(size_classes, size_class_slots, fragmentat
|
|||
used = size * slots
|
||||
real = page_align(used)
|
||||
print("| ", end='')
|
||||
print(size, str(fragmentation) + "%", slots, real, str(100 - used / real * 100) + "%", sep=" | ", end=" |\n")
|
||||
print(size, f"{fragmentation:.4}%", slots, real, str(100 - used / real * 100) + "%", sep=" | ", end=" |\n")
|
||||
|
||||
if len(argv) < 2:
|
||||
exit()
|
2
chacha.c
2
chacha.c
|
@ -41,7 +41,7 @@ static const unsigned rounds = 8;
|
|||
a = PLUS(a, b); d = ROTATE(XOR(d, a), 8); \
|
||||
c = PLUS(c, d); b = ROTATE(XOR(b, c), 7);
|
||||
|
||||
static const char sigma[16] = "expand 32-byte k";
|
||||
static const char sigma[16] NONSTRING = "expand 32-byte k";
|
||||
|
||||
void chacha_keysetup(chacha_ctx *x, const u8 *k) {
|
||||
x->input[0] = U8TO32_LITTLE(sigma + 0);
|
||||
|
|
23
config/default.mk
Normal file
23
config/default.mk
Normal file
|
@ -0,0 +1,23 @@
|
|||
CONFIG_WERROR := true
|
||||
CONFIG_NATIVE := true
|
||||
CONFIG_CXX_ALLOCATOR := true
|
||||
CONFIG_UBSAN := false
|
||||
CONFIG_SEAL_METADATA := false
|
||||
CONFIG_ZERO_ON_FREE := true
|
||||
CONFIG_WRITE_AFTER_FREE_CHECK := true
|
||||
CONFIG_SLOT_RANDOMIZE := true
|
||||
CONFIG_SLAB_CANARY := true
|
||||
CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 1
|
||||
CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 1
|
||||
CONFIG_EXTENDED_SIZE_CLASSES := true
|
||||
CONFIG_LARGE_SIZE_CLASSES := true
|
||||
CONFIG_GUARD_SLABS_INTERVAL := 1
|
||||
CONFIG_GUARD_SIZE_DIVISOR := 2
|
||||
CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 256
|
||||
CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024
|
||||
CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB
|
||||
CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32
|
||||
CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
|
||||
CONFIG_N_ARENA := 4
|
||||
CONFIG_STATS := false
|
||||
CONFIG_SELF_INIT := true
|
23
config/light.mk
Normal file
23
config/light.mk
Normal file
|
@ -0,0 +1,23 @@
|
|||
CONFIG_WERROR := true
|
||||
CONFIG_NATIVE := true
|
||||
CONFIG_CXX_ALLOCATOR := true
|
||||
CONFIG_UBSAN := false
|
||||
CONFIG_SEAL_METADATA := false
|
||||
CONFIG_ZERO_ON_FREE := true
|
||||
CONFIG_WRITE_AFTER_FREE_CHECK := false
|
||||
CONFIG_SLOT_RANDOMIZE := false
|
||||
CONFIG_SLAB_CANARY := true
|
||||
CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 0
|
||||
CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 0
|
||||
CONFIG_EXTENDED_SIZE_CLASSES := true
|
||||
CONFIG_LARGE_SIZE_CLASSES := true
|
||||
CONFIG_GUARD_SLABS_INTERVAL := 8
|
||||
CONFIG_GUARD_SIZE_DIVISOR := 2
|
||||
CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 256
|
||||
CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024
|
||||
CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB
|
||||
CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32
|
||||
CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
|
||||
CONFIG_N_ARENA := 4
|
||||
CONFIG_STATS := false
|
||||
CONFIG_SELF_INIT := true
|
982
h_malloc.c
982
h_malloc.c
File diff suppressed because it is too large
Load diff
|
@ -5,7 +5,9 @@
|
|||
|
||||
#include <malloc.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef H_MALLOC_PREFIX
|
||||
#define h_malloc malloc
|
||||
|
@ -21,6 +23,7 @@ __BEGIN_DECLS
|
|||
#define h_malloc_trim malloc_trim
|
||||
#define h_malloc_stats malloc_stats
|
||||
#define h_mallinfo mallinfo
|
||||
#define h_mallinfo2 mallinfo2
|
||||
#define h_malloc_info malloc_info
|
||||
|
||||
#define h_memalign memalign
|
||||
|
@ -30,7 +33,12 @@ __BEGIN_DECLS
|
|||
#define h_malloc_get_state malloc_get_state
|
||||
#define h_malloc_set_state malloc_set_state
|
||||
|
||||
#define h_iterate iterate
|
||||
#define h_mallinfo_narenas mallinfo_narenas
|
||||
#define h_mallinfo_nbins mallinfo_nbins
|
||||
#define h_mallinfo_arena_info mallinfo_arena_info
|
||||
#define h_mallinfo_bin_info mallinfo_bin_info
|
||||
|
||||
#define h_malloc_iterate malloc_iterate
|
||||
#define h_malloc_disable malloc_disable
|
||||
#define h_malloc_enable malloc_enable
|
||||
|
||||
|
@ -40,9 +48,10 @@ __BEGIN_DECLS
|
|||
#endif
|
||||
|
||||
// C standard
|
||||
void *h_malloc(size_t size);
|
||||
void *h_calloc(size_t nmemb, size_t size);
|
||||
void *h_realloc(void *ptr, size_t size);
|
||||
__attribute__((malloc)) __attribute__((alloc_size(1))) void *h_malloc(size_t size);
|
||||
__attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *h_calloc(size_t nmemb, size_t size);
|
||||
__attribute__((alloc_size(2))) void *h_realloc(void *ptr, size_t size);
|
||||
__attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_align(1)))
|
||||
void *h_aligned_alloc(size_t alignment, size_t size);
|
||||
void h_free(void *ptr);
|
||||
|
||||
|
@ -68,36 +77,38 @@ int h_malloc_info(int options, FILE *fp);
|
|||
#endif
|
||||
|
||||
// obsolete glibc extensions
|
||||
__attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_align(1)))
|
||||
void *h_memalign(size_t alignment, size_t size);
|
||||
#ifndef __ANDROID__
|
||||
void *h_valloc(size_t size);
|
||||
void *h_pvalloc(size_t size);
|
||||
__attribute__((malloc)) __attribute__((alloc_size(1))) void *h_valloc(size_t size);
|
||||
__attribute__((malloc)) void *h_pvalloc(size_t size);
|
||||
#endif
|
||||
#ifdef __GLIBC__
|
||||
void h_cfree(void *ptr);
|
||||
void h_cfree(void *ptr) __THROW;
|
||||
void *h_malloc_get_state(void);
|
||||
int h_malloc_set_state(void *state);
|
||||
#endif
|
||||
|
||||
// Android extensions
|
||||
#ifdef __ANDROID__
|
||||
size_t __mallinfo_narenas(void);
|
||||
size_t __mallinfo_nbins(void);
|
||||
struct mallinfo __mallinfo_arena_info(size_t arena);
|
||||
struct mallinfo __mallinfo_bin_info(size_t arena, size_t bin);
|
||||
int h_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t ptr, size_t size, void *arg),
|
||||
size_t h_mallinfo_narenas(void);
|
||||
size_t h_mallinfo_nbins(void);
|
||||
struct mallinfo h_mallinfo_arena_info(size_t arena);
|
||||
struct mallinfo h_mallinfo_bin_info(size_t arena, size_t bin);
|
||||
int h_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t ptr, size_t size, void *arg),
|
||||
void *arg);
|
||||
void h_malloc_disable(void);
|
||||
void h_malloc_enable(void);
|
||||
void h_malloc_disable_memory_tagging(void);
|
||||
#endif
|
||||
|
||||
// hardened_malloc extensions
|
||||
|
||||
// return an upper bound on object size for any pointer based on malloc metadata
|
||||
size_t h_malloc_object_size(void *ptr);
|
||||
size_t h_malloc_object_size(const void *ptr);
|
||||
|
||||
// similar to malloc_object_size, but avoiding locking so the results are much more limited
|
||||
size_t h_malloc_object_size_fast(void *ptr);
|
||||
size_t h_malloc_object_size_fast(const void *ptr);
|
||||
|
||||
// The free function with an extra parameter for passing the size requested at
|
||||
// allocation time.
|
||||
|
@ -111,6 +122,8 @@ size_t h_malloc_object_size_fast(void *ptr);
|
|||
// passed size matches the allocated size.
|
||||
void h_free_sized(void *ptr, size_t expected_size);
|
||||
|
||||
__END_DECLS
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
101
memory.c
101
memory.c
|
@ -1,7 +1,10 @@
|
|||
#include <errno.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
#ifdef LABEL_MEMORY
|
||||
#include <sys/prctl.h>
|
||||
#endif
|
||||
|
||||
#ifndef PR_SET_VMA
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
|
@ -14,8 +17,8 @@
|
|||
#include "memory.h"
|
||||
#include "util.h"
|
||||
|
||||
void *memory_map(size_t size) {
|
||||
void *p = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
static void *memory_map_prot(size_t size, int prot) {
|
||||
void *p = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
if (unlikely(p == MAP_FAILED)) {
|
||||
if (errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM mmap failure");
|
||||
|
@ -25,30 +28,50 @@ void *memory_map(size_t size) {
|
|||
return p;
|
||||
}
|
||||
|
||||
int memory_map_fixed(void *ptr, size_t size) {
|
||||
void *p = mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
|
||||
if (unlikely(p == MAP_FAILED)) {
|
||||
if (errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM MAP_FIXED mmap failure");
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
void *memory_map(size_t size) {
|
||||
return memory_map_prot(size, PROT_NONE);
|
||||
}
|
||||
|
||||
int memory_unmap(void *ptr, size_t size) {
|
||||
int ret = munmap(ptr, size);
|
||||
#ifdef HAS_ARM_MTE
|
||||
// Note that PROT_MTE can't be cleared via mprotect
|
||||
void *memory_map_mte(size_t size) {
|
||||
return memory_map_prot(size, PROT_MTE);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool memory_map_fixed_prot(void *ptr, size_t size, int prot) {
|
||||
void *p = mmap(ptr, size, prot, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
|
||||
bool ret = p == MAP_FAILED;
|
||||
if (unlikely(ret) && errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM MAP_FIXED mmap failure");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool memory_map_fixed(void *ptr, size_t size) {
|
||||
return memory_map_fixed_prot(ptr, size, PROT_NONE);
|
||||
}
|
||||
|
||||
#ifdef HAS_ARM_MTE
|
||||
// Note that PROT_MTE can't be cleared via mprotect
|
||||
bool memory_map_fixed_mte(void *ptr, size_t size) {
|
||||
return memory_map_fixed_prot(ptr, size, PROT_MTE);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool memory_unmap(void *ptr, size_t size) {
|
||||
bool ret = munmap(ptr, size);
|
||||
if (unlikely(ret) && errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM munmap failure");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey) {
|
||||
static bool memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey) {
|
||||
#ifdef USE_PKEY
|
||||
int ret = pkey_mprotect(ptr, size, prot, pkey);
|
||||
bool ret = pkey_mprotect(ptr, size, prot, pkey);
|
||||
#else
|
||||
int ret = mprotect(ptr, size, prot);
|
||||
bool ret = mprotect(ptr, size, prot);
|
||||
#endif
|
||||
if (unlikely(ret) && errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM mprotect failure");
|
||||
|
@ -56,42 +79,50 @@ static int memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey
|
|||
return ret;
|
||||
}
|
||||
|
||||
int memory_protect_ro(void *ptr, size_t size) {
|
||||
bool memory_protect_ro(void *ptr, size_t size) {
|
||||
return memory_protect_prot(ptr, size, PROT_READ, -1);
|
||||
}
|
||||
|
||||
int memory_protect_rw(void *ptr, size_t size) {
|
||||
bool memory_protect_rw(void *ptr, size_t size) {
|
||||
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, -1);
|
||||
}
|
||||
|
||||
int memory_protect_rw_metadata(void *ptr, size_t size) {
|
||||
bool memory_protect_rw_metadata(void *ptr, size_t size) {
|
||||
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, get_metadata_key());
|
||||
}
|
||||
|
||||
int memory_remap(void *old, size_t old_size, size_t new_size) {
|
||||
#ifdef HAVE_COMPATIBLE_MREMAP
|
||||
bool memory_remap(void *old, size_t old_size, size_t new_size) {
|
||||
void *ptr = mremap(old, old_size, new_size, 0);
|
||||
if (unlikely(ptr == MAP_FAILED)) {
|
||||
if (errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM mremap failure");
|
||||
}
|
||||
return 1;
|
||||
bool ret = ptr == MAP_FAILED;
|
||||
if (unlikely(ret) && errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM mremap failure");
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) {
|
||||
bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) {
|
||||
void *ptr = mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new);
|
||||
if (unlikely(ptr == MAP_FAILED)) {
|
||||
if (errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM MREMAP_FIXED mremap failure");
|
||||
}
|
||||
return 1;
|
||||
bool ret = ptr == MAP_FAILED;
|
||||
if (unlikely(ret) && errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM MREMAP_FIXED mremap failure");
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool memory_purge(void *ptr, size_t size) {
|
||||
int ret = madvise(ptr, size, MADV_DONTNEED);
|
||||
if (unlikely(ret) && errno != ENOMEM) {
|
||||
fatal_error("non-ENOMEM MADV_DONTNEED madvise failure");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void memory_set_name(UNUSED void *ptr, UNUSED size_t size, UNUSED const char *name) {
|
||||
bool memory_set_name(UNUSED void *ptr, UNUSED size_t size, UNUSED const char *name) {
|
||||
#ifdef LABEL_MEMORY
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, size, name);
|
||||
return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, size, name);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
|
30
memory.h
30
memory.h
|
@ -1,18 +1,32 @@
|
|||
#ifndef MEMORY_H
|
||||
#define MEMORY_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __linux__
|
||||
#define HAVE_COMPATIBLE_MREMAP
|
||||
#endif
|
||||
|
||||
int get_metadata_key(void);
|
||||
|
||||
void *memory_map(size_t size);
|
||||
int memory_map_fixed(void *ptr, size_t size);
|
||||
int memory_unmap(void *ptr, size_t size);
|
||||
int memory_protect_ro(void *ptr, size_t size);
|
||||
int memory_protect_rw(void *ptr, size_t size);
|
||||
int memory_protect_rw_metadata(void *ptr, size_t size);
|
||||
int memory_remap(void *old, size_t old_size, size_t new_size);
|
||||
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
|
||||
void memory_set_name(void *ptr, size_t size, const char *name);
|
||||
#ifdef HAS_ARM_MTE
|
||||
void *memory_map_mte(size_t size);
|
||||
#endif
|
||||
bool memory_map_fixed(void *ptr, size_t size);
|
||||
#ifdef HAS_ARM_MTE
|
||||
bool memory_map_fixed_mte(void *ptr, size_t size);
|
||||
#endif
|
||||
bool memory_unmap(void *ptr, size_t size);
|
||||
bool memory_protect_ro(void *ptr, size_t size);
|
||||
bool memory_protect_rw(void *ptr, size_t size);
|
||||
bool memory_protect_rw_metadata(void *ptr, size_t size);
|
||||
#ifdef HAVE_COMPATIBLE_MREMAP
|
||||
bool memory_remap(void *old, size_t old_size, size_t new_size);
|
||||
bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
|
||||
#endif
|
||||
bool memory_purge(void *ptr, size_t size);
|
||||
bool memory_set_name(void *ptr, size_t size, const char *name);
|
||||
|
||||
#endif
|
||||
|
|
50
memtag.h
Normal file
50
memtag.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
#ifndef MEMTAG_H
|
||||
#define MEMTAG_H
|
||||
|
||||
#include "util.h"
|
||||
|
||||
#ifdef HAS_ARM_MTE
|
||||
#include "arm_mte.h"
|
||||
#define MEMTAG 1
|
||||
// Note that bionic libc always reserves tag 0 via PR_MTE_TAG_MASK prctl
|
||||
#define RESERVED_TAG 0
|
||||
#define TAG_WIDTH 4
|
||||
#endif
|
||||
|
||||
static inline void *untag_pointer(void *ptr) {
|
||||
#ifdef HAS_ARM_MTE
|
||||
const uintptr_t mask = UINTPTR_MAX >> 8;
|
||||
return (void *) ((uintptr_t) ptr & mask);
|
||||
#else
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline const void *untag_const_pointer(const void *ptr) {
|
||||
#ifdef HAS_ARM_MTE
|
||||
const uintptr_t mask = UINTPTR_MAX >> 8;
|
||||
return (const void *) ((uintptr_t) ptr & mask);
|
||||
#else
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void *set_pointer_tag(void *ptr, u8 tag) {
|
||||
#ifdef HAS_ARM_MTE
|
||||
return (void *) (((uintptr_t) tag << 56) | (uintptr_t) untag_pointer(ptr));
|
||||
#else
|
||||
(void) tag;
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u8 get_pointer_tag(void *ptr) {
|
||||
#ifdef HAS_ARM_MTE
|
||||
return (((uintptr_t) ptr) >> 56) & 0xf;
|
||||
#else
|
||||
(void) ptr;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
8
new.cc
8
new.cc
|
@ -1,7 +1,9 @@
|
|||
// needed with libstdc++ but not libc++
|
||||
#if __has_include(<bits/functexcept.h>)
|
||||
#include <bits/functexcept.h>
|
||||
#include <new>
|
||||
#endif
|
||||
|
||||
#define noreturn
|
||||
#include <new>
|
||||
|
||||
#include "h_malloc.h"
|
||||
#include "util.h"
|
||||
|
@ -78,7 +80,6 @@ EXPORT void operator delete[](void *ptr, size_t size) noexcept {
|
|||
h_free_sized(ptr, size);
|
||||
}
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
COLD static void *handle_out_of_memory(size_t size, size_t alignment, bool nothrow) {
|
||||
void *ptr = nullptr;
|
||||
|
||||
|
@ -150,4 +151,3 @@ EXPORT void operator delete(void *ptr, size_t size, std::align_val_t) noexcept {
|
|||
EXPORT void operator delete[](void *ptr, size_t size, std::align_val_t) noexcept {
|
||||
h_free_sized(ptr, size);
|
||||
}
|
||||
#endif
|
||||
|
|
12
pages.c
12
pages.c
|
@ -9,10 +9,6 @@ static bool add_guards(size_t size, size_t guard_size, size_t *total_size) {
|
|||
__builtin_add_overflow(*total_size, guard_size, total_size);
|
||||
}
|
||||
|
||||
static uintptr_t alignment_ceiling(uintptr_t s, uintptr_t alignment) {
|
||||
return ((s) + (alignment - 1)) & ((~alignment) + 1);
|
||||
}
|
||||
|
||||
void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, const char *name) {
|
||||
size_t real_size;
|
||||
if (unlikely(add_guards(usable_size, guard_size, &real_size))) {
|
||||
|
@ -33,7 +29,7 @@ void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, cons
|
|||
}
|
||||
|
||||
void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_size, const char *name) {
|
||||
usable_size = PAGE_CEILING(usable_size);
|
||||
usable_size = page_align(usable_size);
|
||||
if (unlikely(!usable_size)) {
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
|
@ -59,7 +55,7 @@ void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_
|
|||
|
||||
void *usable = (char *)real + guard_size;
|
||||
|
||||
size_t lead_size = alignment_ceiling((uintptr_t)usable, alignment) - (uintptr_t)usable;
|
||||
size_t lead_size = align((uintptr_t)usable, alignment) - (uintptr_t)usable;
|
||||
size_t trail_size = alloc_size - lead_size - usable_size;
|
||||
void *base = (char *)usable + lead_size;
|
||||
|
||||
|
@ -86,5 +82,7 @@ void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_
|
|||
}
|
||||
|
||||
void deallocate_pages(void *usable, size_t usable_size, size_t guard_size) {
|
||||
memory_unmap((char *)usable - guard_size, usable_size + guard_size * 2);
|
||||
if (unlikely(memory_unmap((char *)usable - guard_size, usable_size + guard_size * 2))) {
|
||||
memory_purge(usable, usable_size);
|
||||
}
|
||||
}
|
||||
|
|
7
pages.h
7
pages.h
|
@ -5,16 +5,21 @@
|
|||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "util.h"
|
||||
|
||||
#define PAGE_SHIFT 12
|
||||
#ifndef PAGE_SIZE
|
||||
#define PAGE_SIZE ((size_t)1 << PAGE_SHIFT)
|
||||
#endif
|
||||
#define PAGE_CEILING(s) (((s) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
|
||||
|
||||
void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, const char *name);
|
||||
void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_size, const char *name);
|
||||
void deallocate_pages(void *usable, size_t usable_size, size_t guard_size);
|
||||
|
||||
static inline size_t page_align(size_t size) {
|
||||
return align(size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline size_t hash_page(const void *p) {
|
||||
uintptr_t u = (uintptr_t)p >> PAGE_SHIFT;
|
||||
size_t sum = u;
|
||||
|
|
14
random.c
14
random.c
|
@ -5,17 +5,7 @@
|
|||
#include "random.h"
|
||||
#include "util.h"
|
||||
|
||||
#if __has_include(<sys/random.h>)
|
||||
// glibc 2.25 and later
|
||||
#include <sys/random.h>
|
||||
#else
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
static ssize_t getrandom(void *buf, size_t buflen, unsigned int flags) {
|
||||
return syscall(SYS_getrandom, buf, buflen, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void get_random_seed(void *buf, size_t size) {
|
||||
while (size) {
|
||||
|
@ -102,7 +92,7 @@ u16 get_random_u16_uniform(struct random_state *state, u16 bound) {
|
|||
if (leftover < bound) {
|
||||
u16 threshold = -bound % bound;
|
||||
while (leftover < threshold) {
|
||||
random = get_random_u16(state);
|
||||
random = get_random_u16(state);
|
||||
multiresult = random * bound;
|
||||
leftover = (u16)multiresult;
|
||||
}
|
||||
|
@ -129,7 +119,7 @@ u64 get_random_u64_uniform(struct random_state *state, u64 bound) {
|
|||
if (leftover < bound) {
|
||||
u64 threshold = -bound % bound;
|
||||
while (leftover < threshold) {
|
||||
random = get_random_u64(state);
|
||||
random = get_random_u64(state);
|
||||
multiresult = random * bound;
|
||||
leftover = multiresult;
|
||||
}
|
||||
|
|
40
test/.gitignore
vendored
40
test/.gitignore
vendored
|
@ -1,4 +1,44 @@
|
|||
large_array_growth
|
||||
mallinfo
|
||||
mallinfo2
|
||||
malloc_info
|
||||
offset
|
||||
delete_type_size_mismatch
|
||||
double_free_large
|
||||
double_free_large_delayed
|
||||
double_free_small
|
||||
double_free_small_delayed
|
||||
invalid_free_protected
|
||||
invalid_free_small_region
|
||||
invalid_free_small_region_far
|
||||
invalid_free_unprotected
|
||||
read_after_free_large
|
||||
read_after_free_small
|
||||
read_zero_size
|
||||
string_overflow
|
||||
unaligned_free_large
|
||||
unaligned_free_small
|
||||
uninitialized_free
|
||||
uninitialized_malloc_usable_size
|
||||
uninitialized_realloc
|
||||
write_after_free_large
|
||||
write_after_free_large_reuse
|
||||
write_after_free_small
|
||||
write_after_free_small_reuse
|
||||
write_zero_size
|
||||
unaligned_malloc_usable_size_small
|
||||
invalid_malloc_usable_size_small
|
||||
invalid_malloc_usable_size_small_quarantine
|
||||
malloc_object_size
|
||||
malloc_object_size_offset
|
||||
invalid_malloc_object_size_small
|
||||
invalid_malloc_object_size_small_quarantine
|
||||
impossibly_large_malloc
|
||||
overflow_large_1_byte
|
||||
overflow_large_8_byte
|
||||
overflow_small_1_byte
|
||||
overflow_small_8_byte
|
||||
uninitialized_read_large
|
||||
uninitialized_read_small
|
||||
realloc_init
|
||||
__pycache__/
|
||||
|
|
|
@ -1,21 +1,76 @@
|
|||
CONFIG_SLAB_CANARY := true
|
||||
CONFIG_EXTENDED_SIZE_CLASSES := true
|
||||
|
||||
ifneq ($(VARIANT),)
|
||||
$(error testing non-default variants not yet supported)
|
||||
endif
|
||||
|
||||
ifeq (,$(filter $(CONFIG_SLAB_CANARY),true false))
|
||||
$(error CONFIG_SLAB_CANARY must be true or false)
|
||||
endif
|
||||
|
||||
LDLIBS := -lpthread
|
||||
dir=$(dir $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
|
||||
CPPFLAGS += \
|
||||
-DSLAB_CANARY=$(CONFIG_SLAB_CANARY)
|
||||
CPPFLAGS := \
|
||||
-D_GNU_SOURCE \
|
||||
-DSLAB_CANARY=$(CONFIG_SLAB_CANARY) \
|
||||
-DCONFIG_EXTENDED_SIZE_CLASSES=$(CONFIG_EXTENDED_SIZE_CLASSES)
|
||||
|
||||
SHARED_FLAGS := -O3
|
||||
|
||||
CFLAGS := -std=c17 $(SHARED_FLAGS) -Wmissing-prototypes
|
||||
CXXFLAGS := -std=c++17 -fsized-deallocation $(SHARED_FLAGS)
|
||||
LDFLAGS := -Wl,-L$(dir)../out,-R,$(dir)../out
|
||||
|
||||
LDLIBS := -lpthread -lhardened_malloc
|
||||
|
||||
EXECUTABLES := \
|
||||
offset \
|
||||
mallinfo \
|
||||
mallinfo2 \
|
||||
malloc_info \
|
||||
large_array_growth
|
||||
large_array_growth \
|
||||
double_free_large \
|
||||
double_free_large_delayed \
|
||||
double_free_small \
|
||||
double_free_small_delayed \
|
||||
unaligned_free_large \
|
||||
unaligned_free_small \
|
||||
read_after_free_large \
|
||||
read_after_free_small \
|
||||
write_after_free_large \
|
||||
write_after_free_large_reuse \
|
||||
write_after_free_small \
|
||||
write_after_free_small_reuse \
|
||||
read_zero_size \
|
||||
write_zero_size \
|
||||
invalid_free_protected \
|
||||
invalid_free_unprotected \
|
||||
invalid_free_small_region \
|
||||
invalid_free_small_region_far \
|
||||
uninitialized_read_small \
|
||||
uninitialized_read_large \
|
||||
uninitialized_free \
|
||||
uninitialized_realloc \
|
||||
uninitialized_malloc_usable_size \
|
||||
overflow_large_1_byte \
|
||||
overflow_large_8_byte \
|
||||
overflow_small_1_byte \
|
||||
overflow_small_8_byte \
|
||||
string_overflow \
|
||||
delete_type_size_mismatch \
|
||||
unaligned_malloc_usable_size_small \
|
||||
invalid_malloc_usable_size_small \
|
||||
invalid_malloc_usable_size_small_quarantine \
|
||||
malloc_object_size \
|
||||
malloc_object_size_offset \
|
||||
invalid_malloc_object_size_small \
|
||||
invalid_malloc_object_size_small_quarantine \
|
||||
impossibly_large_malloc \
|
||||
realloc_init
|
||||
|
||||
all: $(EXECUTABLES)
|
||||
|
||||
clean:
|
||||
rm -f $(EXECUTABLES)
|
||||
rm -fr ./__pycache__
|
||||
|
|
0
test/__init__.py
Normal file
0
test/__init__.py
Normal file
|
@ -1,11 +1,12 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
struct foo {
|
||||
uint64_t a, b, c, d;
|
||||
};
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
OPTNONE int main(void) {
|
||||
void *p = new char;
|
||||
struct foo *c = (struct foo *)p;
|
||||
delete c;
|
|
@ -1,8 +1,9 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
void *p = malloc(128 * 1024);
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
|
@ -1,12 +1,13 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
void *p = malloc(128 * 1024);
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
void *q = malloc(128 * 1024);
|
||||
void *q = malloc(256 * 1024);
|
||||
if (!q) {
|
||||
return 1;
|
||||
}
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
8
test/impossibly_large_malloc.c
Normal file
8
test/impossibly_large_malloc.c
Normal file
|
@ -0,0 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(-8);
|
||||
return !(p == NULL);
|
||||
}
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
#include <sys/mman.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
free(malloc(16));
|
||||
char *p = mmap(NULL, 4096 * 16, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
if (p == MAP_FAILED) {
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
#include <sys/mman.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
free(malloc(16));
|
||||
char *p = mmap(NULL, 4096 * 16, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
if (p == MAP_FAILED) {
|
15
test/invalid_malloc_object_size_small.c
Normal file
15
test/invalid_malloc_object_size_small.c
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
size_t malloc_object_size(void *ptr);
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
char *q = p + 4096 * 4;
|
||||
malloc_object_size(q);
|
||||
return 0;
|
||||
}
|
15
test/invalid_malloc_object_size_small_quarantine.c
Normal file
15
test/invalid_malloc_object_size_small_quarantine.c
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
size_t malloc_object_size(void *ptr);
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
free(p);
|
||||
malloc_object_size(p);
|
||||
return 0;
|
||||
}
|
13
test/invalid_malloc_usable_size_small.c
Normal file
13
test/invalid_malloc_usable_size_small.c
Normal file
|
@ -0,0 +1,13 @@
|
|||
#include <malloc.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
char *q = p + 4096 * 4;
|
||||
malloc_usable_size(q);
|
||||
return 0;
|
||||
}
|
13
test/invalid_malloc_usable_size_small_quarantine.c
Normal file
13
test/invalid_malloc_usable_size_small_quarantine.c
Normal file
|
@ -0,0 +1,13 @@
|
|||
#include <malloc.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
free(p);
|
||||
malloc_usable_size(p);
|
||||
return 0;
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = NULL;
|
||||
size_t size = 256 * 1024;
|
||||
|
||||
|
|
|
@ -1,21 +1,44 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#if defined(__GLIBC__) || defined(__ANDROID__)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
malloc(1024 * 1024 * 1024);
|
||||
malloc(16);
|
||||
malloc(32);
|
||||
malloc(64);
|
||||
#include "test_util.h"
|
||||
|
||||
static void print_mallinfo(void) {
|
||||
#if defined(__GLIBC__) || defined(__ANDROID__)
|
||||
struct mallinfo info = mallinfo();
|
||||
printf("arena: %zu\n", info.arena);
|
||||
printf("ordblks: %zu\n", info.ordblks);
|
||||
printf("smblks: %zu\n", info.smblks);
|
||||
printf("hblks: %zu\n", info.hblks);
|
||||
printf("hblkhd: %zu\n", info.hblkhd);
|
||||
printf("usmblks: %zu\n", info.usmblks);
|
||||
printf("fsmblks: %zu\n", info.fsmblks);
|
||||
printf("uordblks: %zu\n", info.uordblks);
|
||||
printf("fordblks: %zu\n", info.fordblks);
|
||||
printf("keepcost: %zu\n", info.keepcost);
|
||||
printf("mallinfo:\n");
|
||||
printf("arena: %zu\n", (size_t)info.arena);
|
||||
printf("ordblks: %zu\n", (size_t)info.ordblks);
|
||||
printf("smblks: %zu\n", (size_t)info.smblks);
|
||||
printf("hblks: %zu\n", (size_t)info.hblks);
|
||||
printf("hblkhd: %zu\n", (size_t)info.hblkhd);
|
||||
printf("usmblks: %zu\n", (size_t)info.usmblks);
|
||||
printf("fsmblks: %zu\n", (size_t)info.fsmblks);
|
||||
printf("uordblks: %zu\n", (size_t)info.uordblks);
|
||||
printf("fordblks: %zu\n", (size_t)info.fordblks);
|
||||
printf("keepcost: %zu\n", (size_t)info.keepcost);
|
||||
#endif
|
||||
}
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *a[4];
|
||||
|
||||
a[0] = malloc(1024 * 1024 * 1024);
|
||||
a[1] = malloc(16);
|
||||
a[2] = malloc(32);
|
||||
a[3] = malloc(64);
|
||||
|
||||
print_mallinfo();
|
||||
|
||||
free(a[0]);
|
||||
free(a[1]);
|
||||
free(a[2]);
|
||||
free(a[3]);
|
||||
|
||||
printf("\n");
|
||||
print_mallinfo();
|
||||
}
|
||||
|
|
44
test/mallinfo2.c
Normal file
44
test/mallinfo2.c
Normal file
|
@ -0,0 +1,44 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(__GLIBC__)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
static void print_mallinfo2(void) {
|
||||
#if defined(__GLIBC__)
|
||||
struct mallinfo2 info = mallinfo2();
|
||||
printf("mallinfo2:\n");
|
||||
printf("arena: %zu\n", (size_t)info.arena);
|
||||
printf("ordblks: %zu\n", (size_t)info.ordblks);
|
||||
printf("smblks: %zu\n", (size_t)info.smblks);
|
||||
printf("hblks: %zu\n", (size_t)info.hblks);
|
||||
printf("hblkhd: %zu\n", (size_t)info.hblkhd);
|
||||
printf("usmblks: %zu\n", (size_t)info.usmblks);
|
||||
printf("fsmblks: %zu\n", (size_t)info.fsmblks);
|
||||
printf("uordblks: %zu\n", (size_t)info.uordblks);
|
||||
printf("fordblks: %zu\n", (size_t)info.fordblks);
|
||||
printf("keepcost: %zu\n", (size_t)info.keepcost);
|
||||
#endif
|
||||
}
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *a[4];
|
||||
|
||||
a[0] = malloc(1024 * 1024 * 1024);
|
||||
a[1] = malloc(16);
|
||||
a[2] = malloc(32);
|
||||
a[3] = malloc(64);
|
||||
|
||||
print_mallinfo2();
|
||||
|
||||
free(a[0]);
|
||||
free(a[1]);
|
||||
free(a[2]);
|
||||
free(a[3]);
|
||||
|
||||
printf("\n");
|
||||
print_mallinfo2();
|
||||
}
|
|
@ -1,16 +1,22 @@
|
|||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(__GLIBC__) || defined(__ANDROID__)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
void leak_memory(void) {
|
||||
(void)malloc(1024 * 1024 * 1024);
|
||||
(void)malloc(16);
|
||||
(void)malloc(32);
|
||||
(void)malloc(4096);
|
||||
#include "test_util.h"
|
||||
#include "../util.h"
|
||||
|
||||
OPTNONE static void leak_memory(void) {
|
||||
(void)!malloc(1024 * 1024 * 1024);
|
||||
(void)!malloc(16);
|
||||
(void)!malloc(32);
|
||||
(void)!malloc(4096);
|
||||
}
|
||||
|
||||
void *do_work(void *p) {
|
||||
static void *do_work(UNUSED void *p) {
|
||||
leak_memory();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -24,5 +30,7 @@ int main(void) {
|
|||
pthread_join(thread[i], NULL);
|
||||
}
|
||||
|
||||
#if defined(__GLIBC__) || defined(__ANDROID__)
|
||||
malloc_info(0, stdout);
|
||||
#endif
|
||||
}
|
||||
|
|
12
test/malloc_object_size.c
Normal file
12
test/malloc_object_size.c
Normal file
|
@ -0,0 +1,12 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
size_t malloc_object_size(void *ptr);
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
size_t size = malloc_object_size(p);
|
||||
return size != (SLAB_CANARY ? 24 : 32);
|
||||
}
|
12
test/malloc_object_size_offset.c
Normal file
12
test/malloc_object_size_offset.c
Normal file
|
@ -0,0 +1,12 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
size_t malloc_object_size(void *ptr);
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
size_t size = malloc_object_size(p + 5);
|
||||
return size != (SLAB_CANARY ? 19 : 27);
|
||||
}
|
|
@ -3,7 +3,7 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
static unsigned size_classes[] = {
|
||||
static size_t size_classes[] = {
|
||||
/* large */ 4 * 1024 * 1024,
|
||||
/* 0 */ 0,
|
||||
/* 16 */ 16, 32, 48, 64, 80, 96, 112, 128,
|
||||
|
@ -13,7 +13,12 @@ static unsigned size_classes[] = {
|
|||
/* 256 */ 1280, 1536, 1792, 2048,
|
||||
/* 512 */ 2560, 3072, 3584, 4096,
|
||||
/* 1024 */ 5120, 6144, 7168, 8192,
|
||||
/* 2048 */ 10240, 12288, 14336, 16384
|
||||
/* 2048 */ 10240, 12288, 14336, 16384,
|
||||
#if CONFIG_EXTENDED_SIZE_CLASSES
|
||||
/* 4096 */ 20480, 24576, 28672, 32768,
|
||||
/* 8192 */ 40960, 49152, 57344, 65536,
|
||||
/* 16384 */ 81920, 98304, 114688, 131072,
|
||||
#endif
|
||||
};
|
||||
|
||||
#define N_SIZE_CLASSES (sizeof(size_classes) / sizeof(size_classes[0]))
|
||||
|
@ -27,9 +32,9 @@ int main(void) {
|
|||
|
||||
void *p[N_SIZE_CLASSES];
|
||||
for (unsigned i = 0; i < N_SIZE_CLASSES; i++) {
|
||||
unsigned size = size_classes[i];
|
||||
size_t size = size_classes[i];
|
||||
p[i] = malloc(size);
|
||||
if (!p) {
|
||||
if (!p[i]) {
|
||||
return 1;
|
||||
}
|
||||
void *q = malloc(size);
|
||||
|
|
15
test/overflow_large_1_byte.c
Normal file
15
test/overflow_large_1_byte.c
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include <malloc.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
size_t size = malloc_usable_size(p);
|
||||
*(p + size) = 0;
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
15
test/overflow_large_8_byte.c
Normal file
15
test/overflow_large_8_byte.c
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include <malloc.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
size_t size = malloc_usable_size(p);
|
||||
*(p + size + 7) = 0;
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
15
test/overflow_small_1_byte.c
Normal file
15
test/overflow_small_1_byte.c
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include <malloc.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(8);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
size_t size = malloc_usable_size(p);
|
||||
*(p + size) = 1;
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
16
test/overflow_small_8_byte.c
Normal file
16
test/overflow_small_8_byte.c
Normal file
|
@ -0,0 +1,16 @@
|
|||
#include <malloc.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(8);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
size_t size = malloc_usable_size(p);
|
||||
// XOR is used to avoid the test having a 1/256 chance to fail
|
||||
*(p + size + 7) ^= 1;
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
21
test/read_after_free_large.c
Normal file
21
test/read_after_free_large.c
Normal file
|
@ -0,0 +1,21 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
memset(p, 'a', 16);
|
||||
free(p);
|
||||
for (size_t i = 0; i < 256 * 1024; i++) {
|
||||
printf("%x\n", p[i]);
|
||||
if (p[i] != '\0') {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -2,8 +2,9 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
||||
|
@ -12,6 +13,9 @@ int main(void) {
|
|||
free(p);
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
printf("%x\n", p[i]);
|
||||
if (p[i] != '\0') {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(0);
|
||||
if (!p) {
|
||||
return 1;
|
33
test/realloc_init.c
Normal file
33
test/realloc_init.c
Normal file
|
@ -0,0 +1,33 @@
|
|||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
static void *thread_func(void *arg) {
|
||||
arg = realloc(arg, 1024);
|
||||
if (!arg) {
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
free(arg);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
void *mem = realloc(NULL, 12);
|
||||
if (!mem) {
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
pthread_t thread;
|
||||
int r = pthread_create(&thread, NULL, thread_func, mem);
|
||||
if (r != 0) {
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
r = pthread_join(thread, NULL);
|
||||
if (r != 0) {
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
25
test/simple-memory-corruption/.gitignore
vendored
25
test/simple-memory-corruption/.gitignore
vendored
|
@ -1,25 +0,0 @@
|
|||
delete_type_size_mismatch
|
||||
double_free_large
|
||||
double_free_large_delayed
|
||||
double_free_small
|
||||
double_free_small_delayed
|
||||
eight_byte_overflow_large
|
||||
eight_byte_overflow_small
|
||||
invalid_free_protected
|
||||
invalid_free_small_region
|
||||
invalid_free_small_region_far
|
||||
invalid_free_unprotected
|
||||
read_after_free_large
|
||||
read_after_free_small
|
||||
read_zero_size
|
||||
string_overflow
|
||||
unaligned_free_large
|
||||
unaligned_free_small
|
||||
uninitialized_free
|
||||
uninitialized_malloc_usable_size
|
||||
uninitialized_realloc
|
||||
write_after_free_large
|
||||
write_after_free_large_reuse
|
||||
write_after_free_small
|
||||
write_after_free_small_reuse
|
||||
write_zero_size
|
|
@ -1,31 +0,0 @@
|
|||
EXECUTABLES := \
|
||||
double_free_large \
|
||||
double_free_large_delayed \
|
||||
double_free_small \
|
||||
double_free_small_delayed \
|
||||
unaligned_free_large \
|
||||
unaligned_free_small \
|
||||
read_after_free_large \
|
||||
read_after_free_small \
|
||||
write_after_free_large \
|
||||
write_after_free_large_reuse \
|
||||
write_after_free_small \
|
||||
write_after_free_small_reuse \
|
||||
read_zero_size \
|
||||
write_zero_size \
|
||||
invalid_free_protected \
|
||||
invalid_free_unprotected \
|
||||
invalid_free_small_region \
|
||||
invalid_free_small_region_far \
|
||||
uninitialized_free \
|
||||
uninitialized_realloc \
|
||||
uninitialized_malloc_usable_size \
|
||||
eight_byte_overflow_small \
|
||||
eight_byte_overflow_large \
|
||||
string_overflow \
|
||||
delete_type_size_mismatch
|
||||
|
||||
all: $(EXECUTABLES)
|
||||
|
||||
clean:
|
||||
rm -f $(EXECUTABLES)
|
|
@ -1,12 +0,0 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
char *p = malloc(128 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
*(p + 128 * 1024 + 7) = 0;
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
char *p = malloc(8);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
*(p + 8 + 7) = 0;
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
char *p = malloc(128 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
memset(p, 'a', 16);
|
||||
free(p);
|
||||
for (size_t i = 0; i < 128 * 1024; i++) {
|
||||
printf("%x\n", p[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
char *p = malloc(128 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
free(p);
|
||||
char *q = malloc(128 * 1024);
|
||||
p[64 * 1024 + 1] = 'a';
|
||||
return 0;
|
||||
}
|
|
@ -4,8 +4,9 @@
|
|||
|
||||
#include <malloc.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
242
test/test_smc.py
Normal file
242
test/test_smc.py
Normal file
|
@ -0,0 +1,242 @@
|
|||
import os
|
||||
import subprocess
|
||||
import unittest
|
||||
|
||||
|
||||
class TestSimpleMemoryCorruption(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
self.dir = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
def run_test(self, test_name):
|
||||
sub = subprocess.Popen(self.dir + "/" + test_name,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = sub.communicate()
|
||||
return stdout, stderr, sub.returncode
|
||||
|
||||
def test_delete_type_size_mismatch(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"delete_type_size_mismatch")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode(
|
||||
"utf-8"), "fatal allocator error: sized deallocation mismatch (small)\n")
|
||||
|
||||
def test_double_free_large_delayed(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"double_free_large_delayed")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_double_free_large(self):
|
||||
_stdout, stderr, returncode = self.run_test("double_free_large")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_double_free_small_delayed(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"double_free_small_delayed")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: double free (quarantine)\n")
|
||||
|
||||
def test_double_free_small(self):
|
||||
_stdout, stderr, returncode = self.run_test("double_free_small")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: double free (quarantine)\n")
|
||||
|
||||
def test_overflow_large_1_byte(self):
|
||||
_stdout, _stderr, returncode = self.run_test(
|
||||
"overflow_large_1_byte")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_overflow_large_8_byte(self):
|
||||
_stdout, _stderr, returncode = self.run_test(
|
||||
"overflow_large_8_byte")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_overflow_small_1_byte(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"overflow_small_1_byte")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: canary corrupted\n")
|
||||
|
||||
def test_overflow_small_8_byte(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"overflow_small_8_byte")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: canary corrupted\n")
|
||||
|
||||
def test_invalid_free_protected(self):
|
||||
_stdout, stderr, returncode = self.run_test("invalid_free_protected")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_invalid_free_small_region_far(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_free_small_region_far")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode(
|
||||
"utf-8"), "fatal allocator error: invalid free within a slab yet to be used\n")
|
||||
|
||||
def test_invalid_free_small_region(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_free_small_region")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: double free\n")
|
||||
|
||||
def test_invalid_free_unprotected(self):
|
||||
_stdout, stderr, returncode = self.run_test("invalid_free_unprotected")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_invalid_malloc_usable_size_small_quarantene(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_malloc_usable_size_small_quarantine")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode(
|
||||
"utf-8"), "fatal allocator error: invalid malloc_usable_size (quarantine)\n")
|
||||
|
||||
def test_invalid_malloc_usable_size_small(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_malloc_usable_size_small")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode(
|
||||
"utf-8"), "fatal allocator error: invalid malloc_usable_size\n")
|
||||
|
||||
def test_read_after_free_large(self):
|
||||
_stdout, _stderr, returncode = self.run_test("read_after_free_large")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_read_after_free_small(self):
|
||||
stdout, _stderr, returncode = self.run_test("read_after_free_small")
|
||||
self.assertEqual(returncode, 0)
|
||||
self.assertEqual(stdout.decode("utf-8"),
|
||||
"0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n")
|
||||
|
||||
def test_read_zero_size(self):
|
||||
_stdout, _stderr, returncode = self.run_test("read_zero_size")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_string_overflow(self):
|
||||
stdout, _stderr, returncode = self.run_test("string_overflow")
|
||||
self.assertEqual(returncode, 0)
|
||||
self.assertEqual(stdout.decode("utf-8"), "overflow by 0 bytes\n")
|
||||
|
||||
def test_unaligned_free_large(self):
|
||||
_stdout, stderr, returncode = self.run_test("unaligned_free_large")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_unaligned_free_small(self):
|
||||
_stdout, stderr, returncode = self.run_test("unaligned_free_small")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid unaligned free\n")
|
||||
|
||||
def test_unaligned_malloc_usable_size_small(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"unaligned_malloc_usable_size_small")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid unaligned malloc_usable_size\n")
|
||||
|
||||
def test_uninitialized_free(self):
|
||||
_stdout, stderr, returncode = self.run_test("uninitialized_free")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid free\n")
|
||||
|
||||
def test_uninitialized_malloc_usable_size(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"uninitialized_malloc_usable_size")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid malloc_usable_size\n")
|
||||
|
||||
def test_uninitialized_realloc(self):
|
||||
_stdout, stderr, returncode = self.run_test("uninitialized_realloc")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: invalid realloc\n")
|
||||
|
||||
def test_write_after_free_large_reuse(self):
|
||||
_stdout, _stderr, returncode = self.run_test(
|
||||
"write_after_free_large_reuse")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_write_after_free_large(self):
|
||||
_stdout, _stderr, returncode = self.run_test("write_after_free_large")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_write_after_free_small_reuse(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"write_after_free_small_reuse")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: detected write after free\n")
|
||||
|
||||
def test_write_after_free_small(self):
|
||||
_stdout, stderr, returncode = self.run_test("write_after_free_small")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode("utf-8"),
|
||||
"fatal allocator error: detected write after free\n")
|
||||
|
||||
def test_write_zero_size(self):
|
||||
_stdout, _stderr, returncode = self.run_test("write_zero_size")
|
||||
self.assertEqual(returncode, -11)
|
||||
|
||||
def test_malloc_object_size(self):
|
||||
_stdout, _stderr, returncode = self.run_test("malloc_object_size")
|
||||
self.assertEqual(returncode, 0)
|
||||
|
||||
def test_malloc_object_size_offset(self):
|
||||
_stdout, _stderr, returncode = self.run_test(
|
||||
"malloc_object_size_offset")
|
||||
self.assertEqual(returncode, 0)
|
||||
|
||||
def test_invalid_malloc_object_size_small(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_malloc_object_size_small")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode(
|
||||
"utf-8"), "fatal allocator error: invalid malloc_object_size\n")
|
||||
|
||||
def test_invalid_malloc_object_size_small_quarantine(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"invalid_malloc_object_size_small_quarantine")
|
||||
self.assertEqual(returncode, -6)
|
||||
self.assertEqual(stderr.decode(
|
||||
"utf-8"), "fatal allocator error: invalid malloc_object_size (quarantine)\n")
|
||||
|
||||
def test_impossibly_large_malloc(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"impossibly_large_malloc")
|
||||
self.assertEqual(returncode, 0)
|
||||
|
||||
def test_uninitialized_read_small(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"uninitialized_read_small")
|
||||
self.assertEqual(returncode, 0)
|
||||
|
||||
def test_uninitialized_read_large(self):
|
||||
_stdout, stderr, returncode = self.run_test(
|
||||
"uninitialized_read_large")
|
||||
self.assertEqual(returncode, 0)
|
||||
|
||||
def test_realloc_init(self):
|
||||
_stdout, _stderr, returncode = self.run_test(
|
||||
"realloc_init")
|
||||
self.assertEqual(returncode, 0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
10
test/test_util.h
Normal file
10
test/test_util.h
Normal file
|
@ -0,0 +1,10 @@
|
|||
#ifndef TEST_UTIL_H
|
||||
#define TEST_UTIL_H
|
||||
|
||||
#ifdef __clang__
|
||||
#define OPTNONE __attribute__((optnone))
|
||||
#else
|
||||
#define OPTNONE __attribute__((optimize(0)))
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -1,8 +1,9 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
char *p = malloc(128 * 1024);
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
12
test/unaligned_malloc_usable_size_small.c
Normal file
12
test/unaligned_malloc_usable_size_small.c
Normal file
|
@ -0,0 +1,12 @@
|
|||
#include <malloc.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(16);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
malloc_usable_size(p + 1);
|
||||
return 0;
|
||||
}
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
free((void *)1);
|
||||
return 0;
|
||||
}
|
|
@ -1,7 +1,8 @@
|
|||
#include <malloc.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
malloc_usable_size((void *)1);
|
||||
return 0;
|
||||
}
|
14
test/uninitialized_read_large.c
Normal file
14
test/uninitialized_read_large.c
Normal file
|
@ -0,0 +1,14 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
for (unsigned i = 0; i < 256 * 1024; i++) {
|
||||
if (p[i] != 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
14
test/uninitialized_read_small.c
Normal file
14
test/uninitialized_read_small.c
Normal file
|
@ -0,0 +1,14 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(8);
|
||||
for (unsigned i = 0; i < 8; i++) {
|
||||
if (p[i] != 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
free(p);
|
||||
return 0;
|
||||
}
|
|
@ -1,7 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
void *p = realloc((void *)1, 16);
|
||||
if (!p) {
|
||||
return 1;
|
|
@ -1,9 +1,9 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
char *p = malloc(128 * 1024);
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
16
test/write_after_free_large_reuse.c
Normal file
16
test/write_after_free_large_reuse.c
Normal file
|
@ -0,0 +1,16 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "../util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(256 * 1024);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
free(p);
|
||||
UNUSED char *q = malloc(256 * 1024);
|
||||
p[64 * 1024 + 1] = 'a';
|
||||
return 0;
|
||||
}
|
|
@ -1,8 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(128);
|
||||
if (!p) {
|
||||
return 1;
|
|
@ -1,14 +1,15 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
#include "../util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(128);
|
||||
if (!p) {
|
||||
return 1;
|
||||
}
|
||||
free(p);
|
||||
char *q = malloc(128);
|
||||
UNUSED char *q = malloc(128);
|
||||
|
||||
p[65] = 'a';
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
__attribute__((optimize(0)))
|
||||
int main(void) {
|
||||
#include "test_util.h"
|
||||
|
||||
OPTNONE int main(void) {
|
||||
char *p = malloc(0);
|
||||
if (!p) {
|
||||
return 1;
|
3675
third_party/libdivide.h
vendored
3675
third_party/libdivide.h
vendored
File diff suppressed because it is too large
Load diff
13
util.c
13
util.c
|
@ -4,8 +4,15 @@
|
|||
|
||||
#include <unistd.h>
|
||||
|
||||
#ifdef __ANDROID__
|
||||
#include <async_safe/log.h>
|
||||
int mallopt(int param, int value);
|
||||
#define M_BIONIC_RESTORE_DEFAULT_SIGABRT_HANDLER (-1003)
|
||||
#endif
|
||||
|
||||
#include "util.h"
|
||||
|
||||
#ifndef __ANDROID__
|
||||
static int write_full(int fd, const char *buf, size_t length) {
|
||||
do {
|
||||
ssize_t bytes_written = write(fd, buf, length);
|
||||
|
@ -21,11 +28,17 @@ static int write_full(int fd, const char *buf, size_t length) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
COLD noreturn void fatal_error(const char *s) {
|
||||
#ifdef __ANDROID__
|
||||
mallopt(M_BIONIC_RESTORE_DEFAULT_SIGABRT_HANDLER, 0);
|
||||
async_safe_fatal("hardened_malloc: fatal allocator error: %s", s);
|
||||
#else
|
||||
const char *prefix = "fatal allocator error: ";
|
||||
(void)(write_full(STDERR_FILENO, prefix, strlen(prefix)) != -1 &&
|
||||
write_full(STDERR_FILENO, s, strlen(s)) != -1 &&
|
||||
write_full(STDERR_FILENO, "\n", 1));
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
|
75
util.h
75
util.h
|
@ -1,11 +1,17 @@
|
|||
#ifndef UTIL_H
|
||||
#define UTIL_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdnoreturn.h>
|
||||
|
||||
// C11 noreturn doesn't work in C++
|
||||
#define noreturn __attribute__((noreturn))
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define likely51(x) __builtin_expect_with_probability(!!(x), 1, 0.51)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#define unlikely51(x) __builtin_expect_with_probability(!!(x), 0, 0.51)
|
||||
|
||||
#define min(x, y) ({ \
|
||||
__typeof__(x) _x = (x); \
|
||||
|
@ -26,11 +32,12 @@
|
|||
#define STRINGIFY(s) #s
|
||||
#define ALIAS(f) __attribute__((alias(STRINGIFY(f))))
|
||||
|
||||
static inline int ffzl(long x) {
|
||||
return __builtin_ffsl(~x);
|
||||
}
|
||||
|
||||
COLD noreturn void fatal_error(const char *s);
|
||||
// supported since GCC 15
|
||||
#if __has_attribute (nonstring)
|
||||
# define NONSTRING __attribute__ ((nonstring))
|
||||
#else
|
||||
# define NONSTRING
|
||||
#endif
|
||||
|
||||
typedef uint8_t u8;
|
||||
typedef uint16_t u16;
|
||||
|
@ -38,28 +45,50 @@ typedef uint32_t u32;
|
|||
typedef uint64_t u64;
|
||||
typedef unsigned __int128 u128;
|
||||
|
||||
// use __register_atfork directly to avoid linking with libpthread for glibc < 2.28
|
||||
#ifdef __GLIBC__
|
||||
#if !__GLIBC_PREREQ(2, 28)
|
||||
extern void *__dso_handle;
|
||||
extern int __register_atfork(void (*)(void), void (*)(void), void (*)(void), void *);
|
||||
#define atfork(prepare, parent, child) __register_atfork(prepare, parent, child, __dso_handle)
|
||||
#endif
|
||||
#endif
|
||||
#define U64_WIDTH 64
|
||||
|
||||
#ifndef atfork
|
||||
#define atfork pthread_atfork
|
||||
#endif
|
||||
static inline int ffz64(u64 x) {
|
||||
return __builtin_ffsll(~x);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SEAL_METADATA
|
||||
// parameter must not be 0
|
||||
static inline int clz64(u64 x) {
|
||||
return __builtin_clzll(x);
|
||||
}
|
||||
|
||||
// parameter must not be 0
|
||||
static inline u64 log2u64(u64 x) {
|
||||
return U64_WIDTH - clz64(x) - 1;
|
||||
}
|
||||
|
||||
static inline size_t align(size_t size, size_t align) {
|
||||
size_t mask = align - 1;
|
||||
return (size + mask) & ~mask;
|
||||
}
|
||||
|
||||
// u4_arr_{set,get} are helper functions for using u8 array as an array of unsigned 4-bit values.
|
||||
|
||||
// val is treated as a 4-bit value
|
||||
static inline void u4_arr_set(u8 *arr, size_t idx, u8 val) {
|
||||
size_t off = idx >> 1;
|
||||
size_t shift = (idx & 1) << 2;
|
||||
u8 mask = (u8) (0xf0 >> shift);
|
||||
arr[off] = (arr[off] & mask) | (val << shift);
|
||||
}
|
||||
|
||||
static inline u8 u4_arr_get(const u8 *arr, size_t idx) {
|
||||
size_t off = idx >> 1;
|
||||
size_t shift = (idx & 1) << 2;
|
||||
return (u8) ((arr[off] >> shift) & 0xf);
|
||||
}
|
||||
|
||||
COLD noreturn void fatal_error(const char *s);
|
||||
|
||||
#if CONFIG_SEAL_METADATA
|
||||
|
||||
#ifdef __GLIBC__
|
||||
#if __GLIBC_PREREQ(2, 27)
|
||||
#define USE_PKEY
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef USE_PKEY
|
||||
#else
|
||||
#error "CONFIG_SEAL_METADATA requires Memory Protection Key support"
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue