Compare commits

...

4 commits

Author SHA1 Message Date
SkewedZeppelin
132224c742
Merge 15de34ed77 into 261b7bbf09 2025-12-06 08:12:48 -05:00
Ganwtrs
261b7bbf09 Correct title of README from Hardened malloc to hardened_malloc 2025-12-06 00:40:28 -05:00
Ganwtrs
74ef8a96ed Remove spaces around the slash (like one/two) 2025-12-05 21:55:56 -05:00
Tavi
15de34ed77
perform size checks on various operations
Signed-off-by: Tavi <tavi@divested.dev>
Co-authored-by: =?UTF-8?q?Christian=20G=C3=B6ttsche?= <cgzones@googlemail.com>
2025-11-13 15:28:30 -05:00
46 changed files with 1178 additions and 25 deletions

View file

@ -28,6 +28,7 @@ common_cflags = [
"-DN_ARENA=1",
"-DCONFIG_STATS=true",
"-DCONFIG_SELF_INIT=false",
"-DCONFIG_BLOCK_OPS_CHECK_SIZE=false",
]
cc_defaults {

24
CREDITS
View file

@ -23,6 +23,30 @@ h_malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
memcpy.c, memccpy.c, memmove.c, memset.c, swab.c, wmemset.c:
Copyright © 2005-2020 Rich Felker, et al.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Contributor list: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
libdivide:
Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>

View file

@ -40,6 +40,10 @@ CXXFLAGS := $(CXXFLAGS) -std=c++17 -fsized-deallocation $(SHARED_FLAGS)
LDFLAGS := $(LDFLAGS) -Wl,-O1,--as-needed,-z,defs,-z,relro,-z,now,-z,nodlopen,-z,text
SOURCES := chacha.c h_malloc.c memory.c pages.c random.c util.c
ifeq ($(CONFIG_BLOCK_OPS_CHECK_SIZE),true)
SOURCES += memcpy.c memccpy.c memmove.c memset.c swab.c wmemset.c
BOSC_EXTRAS := musl.h
endif
OBJECTS := $(SOURCES:.c=.o)
ifeq ($(CONFIG_CXX_ALLOCATOR),true)
@ -89,6 +93,10 @@ ifeq (,$(filter $(CONFIG_SELF_INIT),true false))
$(error CONFIG_SELF_INIT must be true or false)
endif
ifeq (,$(filter $(CONFIG_BLOCK_OPS_CHECK_SIZE),true false))
$(error CONFIG_BLOCK_OPS_CHECK_SIZE must be true or false)
endif
CPPFLAGS += \
-DCONFIG_SEAL_METADATA=$(CONFIG_SEAL_METADATA) \
-DZERO_ON_FREE=$(CONFIG_ZERO_ON_FREE) \
@ -108,7 +116,8 @@ CPPFLAGS += \
-DCONFIG_CLASS_REGION_SIZE=$(CONFIG_CLASS_REGION_SIZE) \
-DN_ARENA=$(CONFIG_N_ARENA) \
-DCONFIG_STATS=$(CONFIG_STATS) \
-DCONFIG_SELF_INIT=$(CONFIG_SELF_INIT)
-DCONFIG_SELF_INIT=$(CONFIG_SELF_INIT) \
-DCONFIG_BLOCK_OPS_CHECK_SIZE=$(CONFIG_BLOCK_OPS_CHECK_SIZE)
$(OUT)/libhardened_malloc$(SUFFIX).so: $(OBJECTS) | $(OUT)
$(CC) $(CFLAGS) $(LDFLAGS) -shared $^ $(LDLIBS) -o $@
@ -118,7 +127,7 @@ $(OUT):
$(OUT)/chacha.o: chacha.c chacha.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/h_malloc.o: h_malloc.c include/h_malloc.h mutex.h memory.h pages.h random.h util.h $(CONFIG_FILE) | $(OUT)
$(OUT)/h_malloc.o: h_malloc.c include/h_malloc.h mutex.h memory.h $(BOSC_EXTRAS) pages.h random.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/memory.o: memory.c memory.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
@ -126,11 +135,24 @@ $(OUT)/new.o: new.cc include/h_malloc.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.cc) $(OUTPUT_OPTION) $<
$(OUT)/pages.o: pages.c pages.h memory.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/random.o: random.c random.h chacha.h util.h $(CONFIG_FILE) | $(OUT)
$(OUT)/random.o: random.c random.h chacha.h $(BOSC_EXTRAS) util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/util.o: util.c util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/memcpy.o: memcpy.c musl.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) -Wno-cast-align $(OUTPUT_OPTION) $<
$(OUT)/memccpy.o: memccpy.c musl.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) -Wno-cast-align $(OUTPUT_OPTION) $<
$(OUT)/memmove.o: memmove.c musl.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) -Wno-cast-align $(OUTPUT_OPTION) $<
$(OUT)/memset.o: memset.c musl.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) -Wno-cast-align $(OUTPUT_OPTION) $<
$(OUT)/swab.o: swab.c musl.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) -Wno-cast-align $(OUTPUT_OPTION) $<
$(OUT)/wmemset.o: wmemset.c musl.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
check: tidy
tidy:

View file

@ -1,4 +1,4 @@
# Hardened malloc
# hardened_malloc
* [Introduction](#introduction)
* [Dependencies](#dependencies)
@ -169,7 +169,7 @@ Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis
will not work when `AT_SECURE` is set such as with setuid binaries. It's also
generally not a recommended approach for production usage. The recommendation
is to enable it globally and make exceptions for performance critical cases by
running the application in a container / namespace without it enabled.
running the application in a container/namespace without it enabled.
Make sure to raise `vm.max_map_count` substantially too to accommodate the very
large number of guard pages created by hardened\_malloc. As an example, in
@ -255,7 +255,7 @@ The following boolean configuration options are available:
* `CONFIG_WRITE_AFTER_FREE_CHECK`: `true` (default) or `false` to control
sanity checking that new small allocations contain zeroed memory. This can
detect writes caused by a write-after-free vulnerability and mixes well with
the features for making memory reuse randomized / delayed. This has a
the features for making memory reuse randomized/delayed. This has a
performance cost scaling to the size of the allocation, which is usually
acceptable. This is not relevant to large allocations because they're always
a fresh memory mapping from the kernel.
@ -279,6 +279,9 @@ The following boolean configuration options are available:
hardware, which may become drastically lower in the future. Whether or not
this feature is enabled, the metadata is all contained within an isolated
memory region with high entropy random guard regions around it.
* `CONFIG_BLOCK_OPS_CHECK_SIZE`: `true` or `false` (default) to ensure length
parameter of the memcpy/memccpy/memmove/memset block operations and their
wide variants are within approximate bounds to minimize buffer overflows.
The following integer configuration options are available:
@ -341,7 +344,7 @@ larger caches can substantially improves performance).
## Core design
The core design of the allocator is very simple / minimalist. The allocator is
The core design of the allocator is very simple/minimalist. The allocator is
exclusive to 64-bit platforms in order to take full advantage of the abundant
address space without being constrained by needing to keep the design
compatible with 32-bit.
@ -373,13 +376,13 @@ whether it's free, along with a separate bitmap for tracking allocations in the
quarantine. The slab metadata entries in the array have intrusive lists
threaded through them to track partial slabs (partially filled, and these are
the first choice for allocation), empty slabs (limited amount of cached free
memory) and free slabs (purged / memory protected).
memory) and free slabs (purged/memory protected).
Large allocations are tracked via a global hash table mapping their address to
their size and random guard size. They're simply memory mappings and get mapped
on allocation and then unmapped on free. Large allocations are the only dynamic
memory mappings made by the allocator, since the address space for allocator
state (including both small / large allocation metadata) and slab allocations
state (including both small/large allocation metadata) and slab allocations
is statically reserved.
This allocator is aimed at production usage, not aiding with finding and fixing
@ -390,7 +393,7 @@ messages. The design choices are based around minimizing overhead and
maximizing security which often leads to different decisions than a tool
attempting to find bugs. For example, it uses zero-based sanitization on free
and doesn't minimize slack space from size class rounding between the end of an
allocation and the canary / guard region. Zero-based filling has the least
allocation and the canary/guard region. Zero-based filling has the least
chance of uncovering latent bugs, but also the best chance of mitigating
vulnerabilities. The canary feature is primarily meant to act as padding
absorbing small overflows to render them harmless, so slack space is helpful
@ -424,11 +427,11 @@ was a bit less important and if a core goal was finding latent bugs.
* Top-level isolated regions for each arena
* Divided up into isolated inner regions for each size class
* High entropy random base for each size class region
* No deterministic / low entropy offsets between allocations with
* No deterministic/low entropy offsets between allocations with
different size classes
* Metadata is completely outside the slab allocation region
* No references to metadata within the slab allocation region
* No deterministic / low entropy offsets to metadata
* No deterministic/low entropy offsets to metadata
* Entire slab region starts out non-readable and non-writable
* Slabs beyond the cache limit are purged and become non-readable and
non-writable memory again
@ -649,7 +652,7 @@ other. Static assignment can also reduce memory usage since threads may have
varying usage of size classes.
When there's substantial allocation or deallocation pressure, the allocator
does end up calling into the kernel to purge / protect unused slabs by
does end up calling into the kernel to purge/protect unused slabs by
replacing them with fresh `PROT_NONE` regions along with unprotecting slabs
when partially filled and cached empty slabs are depleted. There will be
configuration over the amount of cached empty slabs, but it's not entirely a
@ -696,7 +699,7 @@ The secondary benefit of thread caches is being able to avoid the underlying
allocator implementation entirely for some allocations and deallocations when
they're mixed together rather than many allocations being done together or many
frees being done together. The value of this depends a lot on the application
and it's entirely unsuitable / incompatible with a hardened allocator since it
and it's entirely unsuitable/incompatible with a hardened allocator since it
bypasses all of the underlying security and would destroy much of the security
value.
@ -960,7 +963,7 @@ doesn't handle large allocations within the arenas, so it presents those in the
For example, with 4 arenas enabled, there will be a 5th arena in the statistics
for the large allocations.
The `nmalloc` / `ndalloc` fields are 64-bit integers tracking allocation and
The `nmalloc`/`ndalloc` fields are 64-bit integers tracking allocation and
deallocation count. These are defined as wrapping on overflow, per the jemalloc
implementation.

View file

@ -21,3 +21,4 @@ CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
CONFIG_N_ARENA := 4
CONFIG_STATS := false
CONFIG_SELF_INIT := true
CONFIG_BLOCK_OPS_CHECK_SIZE := false

View file

@ -21,3 +21,4 @@ CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
CONFIG_N_ARENA := 4
CONFIG_STATS := false
CONFIG_SELF_INIT := true
CONFIG_BLOCK_OPS_CHECK_SIZE := false

View file

@ -20,6 +20,10 @@
#include "random.h"
#include "util.h"
#if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE)
#include "musl.h"
#endif
#ifdef USE_PKEY
#include <sys/mman.h>
#endif
@ -528,7 +532,7 @@ static void set_canary(UNUSED const struct slab_metadata *metadata, UNUSED void
}
#endif
memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size);
h_memcpy_internal((char *)p + size - canary_size, &metadata->canary_value, canary_size);
#endif
}
@ -541,7 +545,7 @@ static void check_canary(UNUSED const struct slab_metadata *metadata, UNUSED con
#endif
u64 canary_value;
memcpy(&canary_value, (const char *)p + size - canary_size, canary_size);
h_memcpy_internal(&canary_value, (const char *)p + size - canary_size, canary_size);
#ifdef HAS_ARM_MTE
if (unlikely(canary_value == 0)) {
@ -831,7 +835,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
#endif
if (ZERO_ON_FREE && !skip_zero) {
memset(p, 0, size - canary_size);
h_memset_internal(p, 0, size - canary_size);
}
}
@ -1502,7 +1506,7 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) {
total_size = adjust_size_for_canary(total_size);
void *p = alloc(total_size);
if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= max_slab_size_class) {
memset(p, 0, total_size - canary_size);
h_memset_internal(p, 0, total_size - canary_size);
}
#ifdef HAS_ARM_MTE
// use an assert instead of adding a conditional to memset() above (freed memory is always
@ -1624,7 +1628,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
mutex_unlock(&ra->lock);
if (memory_remap_fixed(old, old_size, new, size)) {
memcpy(new, old, copy_size);
h_memcpy_internal(new, old, copy_size);
deallocate_pages(old, old_size, old_guard_size);
} else {
memory_unmap((char *)old - old_guard_size, old_guard_size);
@ -1646,7 +1650,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
if (copy_size > 0 && copy_size <= max_slab_size_class) {
copy_size -= canary_size;
}
memcpy(new, old_orig, copy_size);
h_memcpy_internal(new, old_orig, copy_size);
if (old_size <= max_slab_size_class) {
deallocate_small(old, NULL);
} else {
@ -1874,6 +1878,133 @@ EXPORT size_t h_malloc_object_size_fast(const void *p) {
return SIZE_MAX;
}
#if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE)
EXPORT void *memcpy(void *restrict dst, const void *restrict src, size_t len) {
if (unlikely(dst == src || len == 0)) {
return dst;
}
if (unlikely(dst < (src + len) && (dst + len) > src)) {
fatal_error("memcpy overlap");
}
if (unlikely(len > malloc_object_size(src))) {
fatal_error("memcpy read overflow");
}
if (unlikely(len > malloc_object_size(dst))) {
fatal_error("memcpy buffer overflow");
}
return musl_memcpy(dst, src, len);
}
EXPORT void *memccpy(void *restrict dst, const void *restrict src, int value, size_t len) {
if (unlikely(dst == src || len == 0)) {
return dst;
}
if (unlikely(dst < (src + len) && (dst + len) > src)) {
fatal_error("memccpy overlap");
}
if (unlikely(len > malloc_object_size(src) && value != 0)) {
fatal_error("memccpy read overflow");
}
if (unlikely(len > malloc_object_size(dst))) {
fatal_error("memccpy buffer overflow");
}
return musl_memccpy(dst, src, value, len);
}
EXPORT void *memmove(void *dst, const void *src, size_t len) {
if (unlikely(dst == src || len == 0)) {
return dst;
}
if (unlikely(len > malloc_object_size(src))) {
fatal_error("memmove read overflow");
}
if (unlikely(len > malloc_object_size(dst))) {
fatal_error("memmove buffer overflow");
}
return musl_memmove(dst, src, len);
}
EXPORT void *mempcpy(void *restrict dst, const void *restrict src, size_t len) {
return memcpy(dst, src, len) + len;
}
EXPORT void *memset(void *dst, int value, size_t len) {
if (unlikely(len == 0)) {
return dst;
}
if (unlikely(len > malloc_object_size(dst))) {
fatal_error("memset buffer overflow");
}
return musl_memset(dst, value, len);
}
EXPORT void bcopy(const void *src, void *dst, size_t len) {
memmove(dst, src, len);
}
EXPORT void swab(const void *restrict src, void *restrict dst, ssize_t len) {
if (unlikely(len <= 0)) {
return;
}
size_t length = len;
if (unlikely(dst < (src + length) && (dst + length) > src)) {
fatal_error("swab overlap");
}
if (unlikely(length > malloc_object_size(src))) {
fatal_error("swab read overflow");
}
if (unlikely(length > malloc_object_size(dst))) {
fatal_error("swab buffer overflow");
}
return musl_swab(src, dst, len);
}
EXPORT wchar_t *wmemcpy(wchar_t *restrict dst, const wchar_t *restrict src, size_t len) {
if (unlikely(dst == src || len == 0)) {
return dst;
}
if (unlikely(dst < (src + len) && (dst + len) > src)) {
fatal_error("wmemcpy overlap");
}
size_t lenAdj = len * sizeof(wchar_t);
if (unlikely(lenAdj > malloc_object_size(src))) {
fatal_error("wmemcpy read overflow");
}
if (unlikely(lenAdj > malloc_object_size(dst))) {
fatal_error("wmemcpy buffer overflow");
}
return (wchar_t *)musl_memcpy((char *)dst, (const char *)src, lenAdj);
}
EXPORT wchar_t *wmemmove(wchar_t *dst, const wchar_t *src, size_t len) {
if (unlikely(dst == src || len == 0)) {
return dst;
}
size_t lenAdj = len * sizeof(wchar_t);
if (unlikely(lenAdj > malloc_object_size(src))) {
fatal_error("wmemmove read overflow");
}
if (unlikely(lenAdj > malloc_object_size(dst))) {
fatal_error("wmemmove buffer overflow");
}
return (wchar_t *)musl_memmove((char *)dst, (const char *)src, lenAdj);
}
EXPORT wchar_t *wmempcpy(wchar_t *restrict dst, const wchar_t *restrict src, size_t len) {
return wmemcpy(dst, src, len) + len;
}
EXPORT wchar_t *wmemset(wchar_t *dst, wchar_t value, size_t len) {
if (unlikely(len == 0)) {
return dst;
}
if (unlikely((len * sizeof(wchar_t)) > malloc_object_size(dst))) {
fatal_error("wmemset buffer overflow");
}
return musl_wmemset(dst, value, len);
}
#endif /* CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE) */
EXPORT int h_mallopt(UNUSED int param, UNUSED int value) {
#ifdef __ANDROID__
if (param == M_PURGE) {

View file

@ -55,6 +55,27 @@ __attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_alig
void *h_aligned_alloc(size_t alignment, size_t size);
void h_free(void *ptr);
#if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE)
void *memcpy(void *dst, const void *src, size_t len);
void *memccpy(void *dst, const void *src, int value, size_t len);
void *memmove(void *dst, const void *src, size_t len);
void *mempcpy(void *dst, const void *src, size_t len);
void *memset(void *dst, int value, size_t len);
void bcopy(const void *src, void *dst, size_t len);
void swab(const void *src, void *dst, ssize_t len);
wchar_t *wmemcpy(wchar_t *dst, const wchar_t *src, size_t len);
wchar_t *wmemmove(wchar_t *dst, const wchar_t *src, size_t len);
wchar_t *wmempcpy(wchar_t *dst, const wchar_t *src, size_t len);
wchar_t *wmemset(wchar_t *dst, wchar_t value, size_t len);
#define h_memcpy_internal musl_memcpy
#define h_memmove_internal musl_memmove
#define h_memset_internal musl_memset
#else
#define h_memcpy_internal memcpy
#define h_memmove_internal memmove
#define h_memset_internal memset
#endif
// POSIX
int h_posix_memalign(void **memptr, size_t alignment, size_t size);

38
memccpy.c Normal file
View file

@ -0,0 +1,38 @@
#include "musl.h"
/* Copied from musl libc version 1.2.5 licensed under the MIT license */
#include <string.h>
#include <stdint.h>
#include <limits.h>
#define ALIGN (sizeof(size_t)-1)
#define ONES ((size_t)-1/UCHAR_MAX)
#define HIGHS (ONES * (UCHAR_MAX/2+1))
#define HASZERO(x) (((x)-ONES) & ~(x) & HIGHS)
void *musl_memccpy(void *restrict dest, const void *restrict src, int c, size_t n)
{
unsigned char *d = dest;
const unsigned char *s = src;
c = (unsigned char)c;
#ifdef __GNUC__
typedef size_t __attribute__((__may_alias__)) word;
word *wd;
const word *ws;
if (((uintptr_t)s & ALIGN) == ((uintptr_t)d & ALIGN)) {
for (; ((uintptr_t)s & ALIGN) && n && (*d=*s)!=c; n--, s++, d++);
if ((uintptr_t)s & ALIGN) goto tail;
size_t k = ONES * c;
wd=(void *)d; ws=(const void *)s;
for (; n>=sizeof(size_t) && !HASZERO(*ws^k);
n-=sizeof(size_t), ws++, wd++) *wd = *ws;
d=(void *)wd; s=(const void *)ws;
}
#endif
for (; n && (*d=*s)!=c; n--, s++, d++);
tail:
if (n) return d+1;
return 0;
}

132
memcpy.c Normal file
View file

@ -0,0 +1,132 @@
#include "musl.h"
/*
* Copied from musl libc version 1.2.5 licensed under the MIT license
*
* Christian Göttsche: Added const qualifiers to retain const correctness.
*/
#include <string.h>
#include <stdint.h>
#include <endian.h>
void *musl_memcpy(void *restrict dest, const void *restrict src, size_t n)
{
unsigned char *d = dest;
const unsigned char *s = src;
#ifdef __GNUC__
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define LS >>
#define RS <<
#else
#define LS <<
#define RS >>
#endif
typedef uint32_t __attribute__((__may_alias__)) u32;
uint32_t w, x;
for (; (uintptr_t)s % 4 && n; n--) *d++ = *s++;
if ((uintptr_t)d % 4 == 0) {
for (; n>=16; s+=16, d+=16, n-=16) {
*(u32 *)(d+0) = *(const u32 *)(s+0);
*(u32 *)(d+4) = *(const u32 *)(s+4);
*(u32 *)(d+8) = *(const u32 *)(s+8);
*(u32 *)(d+12) = *(const u32 *)(s+12);
}
if (n&8) {
*(u32 *)(d+0) = *(const u32 *)(s+0);
*(u32 *)(d+4) = *(const u32 *)(s+4);
d += 8; s += 8;
}
if (n&4) {
*(u32 *)(d+0) = *(const u32 *)(s+0);
d += 4; s += 4;
}
if (n&2) {
*d++ = *s++; *d++ = *s++;
}
if (n&1) {
*d = *s;
}
return dest;
}
if (n >= 32) switch ((uintptr_t)d % 4) {
case 1:
w = *(const u32 *)s;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
n -= 3;
for (; n>=17; s+=16, d+=16, n-=16) {
x = *(const u32 *)(s+1);
*(u32 *)(d+0) = (w LS 24) | (x RS 8);
w = *(const u32 *)(s+5);
*(u32 *)(d+4) = (x LS 24) | (w RS 8);
x = *(const u32 *)(s+9);
*(u32 *)(d+8) = (w LS 24) | (x RS 8);
w = *(const u32 *)(s+13);
*(u32 *)(d+12) = (x LS 24) | (w RS 8);
}
break;
case 2:
w = *(const u32 *)s;
*d++ = *s++;
*d++ = *s++;
n -= 2;
for (; n>=18; s+=16, d+=16, n-=16) {
x = *(const u32 *)(s+2);
*(u32 *)(d+0) = (w LS 16) | (x RS 16);
w = *(const u32 *)(s+6);
*(u32 *)(d+4) = (x LS 16) | (w RS 16);
x = *(const u32 *)(s+10);
*(u32 *)(d+8) = (w LS 16) | (x RS 16);
w = *(const u32 *)(s+14);
*(u32 *)(d+12) = (x LS 16) | (w RS 16);
}
break;
case 3:
w = *(const u32 *)s;
*d++ = *s++;
n -= 1;
for (; n>=19; s+=16, d+=16, n-=16) {
x = *(const u32 *)(s+3);
*(u32 *)(d+0) = (w LS 8) | (x RS 24);
w = *(const u32 *)(s+7);
*(u32 *)(d+4) = (x LS 8) | (w RS 24);
x = *(const u32 *)(s+11);
*(u32 *)(d+8) = (w LS 8) | (x RS 24);
w = *(const u32 *)(s+15);
*(u32 *)(d+12) = (x LS 8) | (w RS 24);
}
break;
}
if (n&16) {
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
}
if (n&8) {
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
}
if (n&4) {
*d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++;
}
if (n&2) {
*d++ = *s++; *d++ = *s++;
}
if (n&1) {
*d = *s;
}
return dest;
#endif
for (; n; n--) *d++ = *s++;
return dest;
}

50
memmove.c Normal file
View file

@ -0,0 +1,50 @@
#include "musl.h"
/*
* Copied from musl libc version 1.2.5 licensed under the MIT license
*
* Christian Göttsche: Added const qualifiers to retain const correctness.
*/
#include <string.h>
#include <stdint.h>
#ifdef __GNUC__
typedef __attribute__((__may_alias__)) size_t WT;
#define WS (sizeof(WT))
#endif
void *musl_memmove(void *dest, const void *src, size_t n)
{
char *d = dest;
const char *s = src;
if (d==s) return d;
if ((uintptr_t)s-(uintptr_t)d-n <= -2*n) return musl_memcpy(d, s, n);
if (d<s) {
#ifdef __GNUC__
if ((uintptr_t)s % WS == (uintptr_t)d % WS) {
while ((uintptr_t)d % WS) {
if (!n--) return dest;
*d++ = *s++;
}
for (; n>=WS; n-=WS, d+=WS, s+=WS) *(WT *)d = *(const WT *)s;
}
#endif
for (; n; n--) *d++ = *s++;
} else {
#ifdef __GNUC__
if ((uintptr_t)s % WS == (uintptr_t)d % WS) {
while ((uintptr_t)(d+n) % WS) {
if (!n--) return dest;
d[n] = s[n];
}
while (n>=WS) n-=WS, *(WT *)(d+n) = *(const WT *)(s+n);
}
#endif
while (n) n--, d[n] = s[n];
}
return dest;
}

94
memset.c Normal file
View file

@ -0,0 +1,94 @@
#include "musl.h"
/* Copied from musl libc version 1.2.5 licensed under the MIT license */
#include <string.h>
#include <stdint.h>
void *musl_memset(void *dest, int c, size_t n)
{
unsigned char *s = dest;
size_t k;
/* Fill head and tail with minimal branching. Each
* conditional ensures that all the subsequently used
* offsets are well-defined and in the dest region. */
if (!n) return dest;
s[0] = c;
s[n-1] = c;
if (n <= 2) return dest;
s[1] = c;
s[2] = c;
s[n-2] = c;
s[n-3] = c;
if (n <= 6) return dest;
s[3] = c;
s[n-4] = c;
if (n <= 8) return dest;
/* Advance pointer to align it at a 4-byte boundary,
* and truncate n to a multiple of 4. The previous code
* already took care of any head/tail that get cut off
* by the alignment. */
k = -(uintptr_t)s & 3;
s += k;
n -= k;
n &= -4;
#ifdef __GNUC__
typedef uint32_t __attribute__((__may_alias__)) u32;
typedef uint64_t __attribute__((__may_alias__)) u64;
u32 c32 = ((u32)-1)/255 * (unsigned char)c;
/* In preparation to copy 32 bytes at a time, aligned on
* an 8-byte bounary, fill head/tail up to 28 bytes each.
* As in the initial byte-based head/tail fill, each
* conditional below ensures that the subsequent offsets
* are valid (e.g. !(n<=24) implies n>=28). */
*(u32 *)(s+0) = c32;
*(u32 *)(s+n-4) = c32;
if (n <= 8) return dest;
*(u32 *)(s+4) = c32;
*(u32 *)(s+8) = c32;
*(u32 *)(s+n-12) = c32;
*(u32 *)(s+n-8) = c32;
if (n <= 24) return dest;
*(u32 *)(s+12) = c32;
*(u32 *)(s+16) = c32;
*(u32 *)(s+20) = c32;
*(u32 *)(s+24) = c32;
*(u32 *)(s+n-28) = c32;
*(u32 *)(s+n-24) = c32;
*(u32 *)(s+n-20) = c32;
*(u32 *)(s+n-16) = c32;
/* Align to a multiple of 8 so we can fill 64 bits at a time,
* and avoid writing the same bytes twice as much as is
* practical without introducing additional branching. */
k = 24 + ((uintptr_t)s & 4);
s += k;
n -= k;
/* If this loop is reached, 28 tail bytes have already been
* filled, so any remainder when n drops below 32 can be
* safely ignored. */
u64 c64 = c32 | ((u64)c32 << 32);
for (; n >= 32; n-=32, s+=32) {
*(u64 *)(s+0) = c64;
*(u64 *)(s+8) = c64;
*(u64 *)(s+16) = c64;
*(u64 *)(s+24) = c64;
}
#else
/* Pure C fallback with no aliasing violations. */
for (; n; n--, s++) *s = c;
#endif
return dest;
}

11
musl.h Normal file
View file

@ -0,0 +1,11 @@
#pragma once
#include <stddef.h>
#include <sys/types.h>
void *musl_memcpy(void *dst, const void *src, size_t len);
void *musl_memccpy(void *restrict dest, const void *restrict src, int c, size_t n);
void *musl_memmove(void *dst, const void *src, size_t len);
void *musl_memset(void *dst, int value, size_t len);
void musl_swab(const void *_src, void *_dest, ssize_t n);
wchar_t *musl_wmemset(wchar_t *dst, wchar_t value, size_t len);

View file

@ -5,6 +5,10 @@
#include "random.h"
#include "util.h"
#if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE)
#include "musl.h"
#endif
#include <sys/random.h>
static void get_random_seed(void *buf, size_t size) {
@ -65,7 +69,7 @@ void get_random_bytes(struct random_state *state, void *buf, size_t size) {
size_t remaining = RANDOM_CACHE_SIZE - state->index;
size_t copy_size = min(size, remaining);
memcpy(buf, state->cache + state->index, copy_size);
h_memcpy_internal(buf, state->cache + state->index, copy_size);
state->index += copy_size;
buf = (char *)buf + copy_size;
@ -79,7 +83,7 @@ u16 get_random_u16(struct random_state *state) {
if (remaining < sizeof(value)) {
refill(state);
}
memcpy(&value, state->cache + state->index, sizeof(value));
h_memcpy_internal(&value, state->cache + state->index, sizeof(value));
state->index += sizeof(value);
return value;
}
@ -106,7 +110,7 @@ u64 get_random_u64(struct random_state *state) {
if (remaining < sizeof(value)) {
refill(state);
}
memcpy(&value, state->cache + state->index, sizeof(value));
h_memcpy_internal(&value, state->cache + state->index, sizeof(value));
state->index += sizeof(value);
return value;
}

View file

@ -22,4 +22,10 @@ u16 get_random_u16_uniform(struct random_state *state, u16 bound);
u64 get_random_u64(struct random_state *state);
u64 get_random_u64_uniform(struct random_state *state, u64 bound);
#if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE)
#define h_memcpy_internal musl_memcpy
#else
#define h_memcpy_internal memcpy
#endif
#endif

17
swab.c Normal file
View file

@ -0,0 +1,17 @@
#include "musl.h"
/* Copied from musl libc version 1.2.5 licensed under the MIT license */
#include <unistd.h>
void musl_swab(const void *restrict _src, void *restrict _dest, ssize_t n)
{
const char *src = _src;
char *dest = _dest;
for (; n>1; n-=2) {
dest[0] = src[1];
dest[1] = src[0];
dest += 2;
src += 2;
}
}

26
test/.gitignore vendored
View file

@ -41,4 +41,30 @@ overflow_small_8_byte
uninitialized_read_large
uninitialized_read_small
realloc_init
memcpy_buffer_overflow
memcpy_read_overflow
memcpy_valid_same
memcpy_valid_mismatched
memccpy_buffer_overflow
memccpy_read_overflow
memccpy_valid_same
memccpy_valid_mismatched
memmove_buffer_overflow
memmove_read_overflow
memmove_valid_same
memmove_valid_mismatched
memset_buffer_overflow
memset_valid_same
memset_valid_mismatched
wmemcpy_buffer_overflow
wmemcpy_read_overflow
wmemcpy_valid_same
wmemcpy_valid_mismatched
wmemmove_buffer_overflow
wmemmove_read_overflow
wmemmove_valid_same
wmemmove_valid_mismatched
wmemset_buffer_overflow
wmemset_valid_same
wmemset_valid_mismatched
__pycache__/

View file

@ -67,7 +67,33 @@ EXECUTABLES := \
invalid_malloc_object_size_small \
invalid_malloc_object_size_small_quarantine \
impossibly_large_malloc \
realloc_init
realloc_init \
memcpy_buffer_overflow \
memcpy_read_overflow \
memcpy_valid_same \
memcpy_valid_mismatched \
memccpy_buffer_overflow \
memccpy_read_overflow \
memccpy_valid_same \
memccpy_valid_mismatched \
memmove_buffer_overflow \
memmove_read_overflow \
memmove_valid_same \
memmove_valid_mismatched \
memset_buffer_overflow \
memset_valid_same \
memset_valid_mismatched \
wmemcpy_buffer_overflow \
wmemcpy_read_overflow \
wmemcpy_valid_same \
wmemcpy_valid_mismatched \
wmemmove_buffer_overflow \
wmemmove_read_overflow \
wmemmove_valid_same \
wmemmove_valid_mismatched \
wmemset_buffer_overflow \
wmemset_valid_same \
wmemset_valid_mismatched
all: $(EXECUTABLES)

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(16);
char *secondbuffer = malloc(32);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 32);
memccpy(firstbuffer, secondbuffer, 'b', 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(32);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memccpy(firstbuffer, secondbuffer, 'b', 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(32);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memccpy(firstbuffer, secondbuffer, 'b', 16);
return 0;
}

15
test/memccpy_valid_same.c Normal file
View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(16);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memccpy(firstbuffer, secondbuffer, 'b', 16);
return 0;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(16);
char *secondbuffer = malloc(32);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 32);
memcpy(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(32);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memcpy(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(32);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memcpy(firstbuffer, secondbuffer, 16);
return 0;
}

15
test/memcpy_valid_same.c Normal file
View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(16);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memcpy(firstbuffer, secondbuffer, 16);
return 0;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(16);
char *secondbuffer = malloc(32);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 32);
memmove(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(32);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memmove(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(32);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memmove(firstbuffer, secondbuffer, 16);
return 0;
}

15
test/memmove_valid_same.c Normal file
View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *firstbuffer = malloc(16);
char *secondbuffer = malloc(16);
if (!firstbuffer && !secondbuffer) {
return 1;
}
memset(secondbuffer, 'a', 16);
memmove(firstbuffer, secondbuffer, 16);
return 0;
}

View file

@ -0,0 +1,13 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *buffer = malloc(16);
if (!buffer) {
return 1;
}
memset(buffer, 'a', 32);
return 1;
}

View file

@ -0,0 +1,13 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *buffer = malloc(32);
if (!buffer) {
return 1;
}
memset(buffer, 'a', 16);
return 0;
}

13
test/memset_valid_same.c Normal file
View file

@ -0,0 +1,13 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *buffer = malloc(16);
if (!buffer) {
return 1;
}
memset(buffer, 'a', 16);
return 0;
}

View file

@ -238,5 +238,160 @@ class TestSimpleMemoryCorruption(unittest.TestCase):
"realloc_init")
self.assertEqual(returncode, 0)
#def test_memcpy_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memcpy_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memcpy buffer overflow\n")
#def test_memcpy_read_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memcpy_read_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memcpy read overflow\n")
def test_memcpy_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"memcpy_valid_same")
self.assertEqual(returncode, 0)
def test_memcpy_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"memcpy_valid_mismatched")
self.assertEqual(returncode, 0)
#def test_memccpy_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memccpy_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memccpy buffer overflow\n")
#def test_memccpy_read_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memccpy_read_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memccpy read overflow\n")
def test_memccpy_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"memccpy_valid_same")
self.assertEqual(returncode, 0)
def test_memccpy_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"memccpy_valid_mismatched")
self.assertEqual(returncode, 0)
#def test_memmove_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memmove_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memmove buffer overflow\n")
#def test_memmove_read_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memmove_read_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memmove read overflow\n")
def test_memmove_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"memmove_valid_same")
self.assertEqual(returncode, 0)
def test_memmove_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"memmove_valid_mismatched")
self.assertEqual(returncode, 0)
#def test_memset_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "memset_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: memset buffer overflow\n")
#def test_wmemcpy_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "wmemcpy_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: wmemcpy buffer overflow\n")
#def test_wmemcpy_read_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "wmemcpy_read_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: wmemcpy read overflow\n")
def test_wmemcpy_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"wmemcpy_valid_same")
self.assertEqual(returncode, 0)
def test_wmemcpy_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"wmemcpy_valid_mismatched")
self.assertEqual(returncode, 0)
#def test_wmemmove_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "wmemmove_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: wmemmove buffer overflow\n")
#def test_wmemmove_read_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "wmemmove_read_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: wmemmove read overflow\n")
def test_wmemmove_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"wmemmove_valid_same")
self.assertEqual(returncode, 0)
def test_wmemmove_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"wmemmove_valid_mismatched")
self.assertEqual(returncode, 0)
#def test_wmemset_buffer_overflow(self):
# _stdout, stderr, returncode = self.run_test(
# "wmemset_buffer_overflow")
# self.assertEqual(returncode, -6)
# self.assertEqual(stderr.decode(
# "utf-8"), "fatal allocator error: wmemset buffer overflow\n")
def test_wmemset_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"wmemset_valid_same")
self.assertEqual(returncode, 0)
def test_wmemset_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"wmemset_valid_mismatched")
self.assertEqual(returncode, 0)
def test_memset_valid_same(self):
_stdout, _stderr, returncode = self.run_test(
"memset_valid_same")
self.assertEqual(returncode, 0)
def test_memset_valid_mismatched(self):
_stdout, _stderr, returncode = self.run_test(
"memset_valid_mismatched")
self.assertEqual(returncode, 0)
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(16 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(32 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 32);
wmemcpy(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(32 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(16 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 16);
wmemcpy(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(32 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(16 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 16);
wmemcpy(firstbuffer, secondbuffer, 16);
return 0;
}

15
test/wmemcpy_valid_same.c Normal file
View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(16 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(16 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 16);
wmemcpy(firstbuffer, secondbuffer, 16);
return 0;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(16 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(32 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 32);
wmemmove(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(32 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(16 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 16);
wmemmove(firstbuffer, secondbuffer, 32);
return 1;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(32 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(16 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 16);
wmemmove(firstbuffer, secondbuffer, 16);
return 0;
}

View file

@ -0,0 +1,15 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *firstbuffer = malloc(16 * sizeof(wchar_t));
wchar_t *secondbuffer = malloc(16 * sizeof(wchar_t));
if (!firstbuffer && !secondbuffer) {
return 1;
}
wmemset(secondbuffer, L'\U0001F642', 16);
wmemmove(firstbuffer, secondbuffer, 16);
return 0;
}

View file

@ -0,0 +1,13 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *buffer = malloc(16 * sizeof(wchar_t));
if (!buffer) {
return 1;
}
wmemset(buffer, L'\U0001F642', 32);
return 1;
}

View file

@ -0,0 +1,13 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *buffer = malloc(32 * sizeof(wchar_t));
if (!buffer) {
return 1;
}
wmemset(buffer, L'\U0001F642', 16);
return 0;
}

13
test/wmemset_valid_same.c Normal file
View file

@ -0,0 +1,13 @@
#include <stdlib.h>
#include <wchar.h>
#include "test_util.h"
OPTNONE int main(void) {
wchar_t *buffer = malloc(16 * sizeof(wchar_t));
if (!buffer) {
return 1;
}
wmemset(buffer, L'\U0001F642', 16);
return 0;
}

12
wmemset.c Normal file
View file

@ -0,0 +1,12 @@
#include "musl.h"
/* Copied from musl libc version 1.2.5 licensed under the MIT license */
#include <wchar.h>
wchar_t *musl_wmemset(wchar_t *d, wchar_t c, size_t n)
{
wchar_t *ret = d;
while (n--) *d++ = c;
return ret;
}