Implement use-after-free detection using junk and stash.

On deallocation, sampled pointers (specially aligned) get junked and stashed
into tcache (to prevent immediate reuse).  The expected behavior is to have
read-after-free corrupted and stopped by the junk-filling, while
write-after-free is checked when flushing the stashed pointers.
This commit is contained in:
Qi Wang 2021-10-18 17:33:15 -07:00 committed by Qi Wang
parent 06aac61c4b
commit b75822bc6e
22 changed files with 793 additions and 42 deletions

View File

@ -284,6 +284,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/thread_event.c \ $(srcroot)test/unit/thread_event.c \
$(srcroot)test/unit/ticker.c \ $(srcroot)test/unit/ticker.c \
$(srcroot)test/unit/tsd.c \ $(srcroot)test/unit/tsd.c \
$(srcroot)test/unit/uaf.c \
$(srcroot)test/unit/witness.c \ $(srcroot)test/unit/witness.c \
$(srcroot)test/unit/zero.c \ $(srcroot)test/unit/zero.c \
$(srcroot)test/unit/zero_realloc_abort.c \ $(srcroot)test/unit/zero_realloc_abort.c \

View File

@ -1564,6 +1564,23 @@ if test "x$enable_opt_size_checks" = "x1" ; then
fi fi
AC_SUBST([enable_opt_size_checks]) AC_SUBST([enable_opt_size_checks])
dnl Do not check for use-after-free by default.
AC_ARG_ENABLE([uaf-detection],
[AS_HELP_STRING([--enable-uaf-detection],
[Allow sampled junk-filling on deallocation to detect use-after-free])],
[if test "x$enable_uaf_detection" = "xno" ; then
enable_uaf_detection="0"
else
enable_uaf_detection="1"
fi
],
[enable_uaf_detection="0"]
)
if test "x$enable_uaf_detection" = "x1" ; then
AC_DEFINE([JEMALLOC_UAF_DETECTION], [ ])
fi
AC_SUBST([enable_uaf_detection])
JE_COMPILABLE([a program using __builtin_unreachable], [ JE_COMPILABLE([a program using __builtin_unreachable], [
void foo (void) { void foo (void) {
__builtin_unreachable(); __builtin_unreachable();

View File

@ -98,7 +98,7 @@ struct cache_bin_s {
* when the array is nonempty -- this is in the array). * when the array is nonempty -- this is in the array).
* *
* Recall that since the stack grows down, this is the lowest address in * Recall that since the stack grows down, this is the lowest address in
* the array. * the array. Only adjusted when stashing items.
*/ */
uint16_t low_bits_full; uint16_t low_bits_full;
@ -107,7 +107,7 @@ struct cache_bin_s {
* is empty. * is empty.
* *
* The stack grows down -- this is one past the highest address in the * The stack grows down -- this is one past the highest address in the
* array. * array. Immutable after initialization.
*/ */
uint16_t low_bits_empty; uint16_t low_bits_empty;
}; };
@ -136,6 +136,26 @@ cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
descriptor->bins = bins; descriptor->bins = bins;
} }
JEMALLOC_ALWAYS_INLINE bool
cache_bin_nonfast_aligned(const void *ptr) {
if (!config_uaf_detection) {
return false;
}
/*
* Currently we use alignment to decide which pointer to junk & stash on
* dealloc (for catching use-after-free). In some common cases a
* page-aligned check is needed already (sdalloc w/ config_prof), so we
* are getting it more or less for free -- no added instructions on
* free_fastpath.
*
* Another way of deciding which pointer to sample, is adding another
* thread_event to pick one every N bytes. That also adds no cost on
* the fastpath, however it will tend to pick large allocations which is
* not the desired behavior.
*/
return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
}
/* Returns ncached_max: Upper limit on ncached. */ /* Returns ncached_max: Upper limit on ncached. */
static inline cache_bin_sz_t static inline cache_bin_sz_t
cache_bin_info_ncached_max(cache_bin_info_t *info) { cache_bin_info_ncached_max(cache_bin_info_t *info) {
@ -232,6 +252,20 @@ cache_bin_empty_position_get(cache_bin_t *bin) {
return ret; return ret;
} }
/*
* Internal.
*
* A pointer to the position with the lowest address of the backing array.
*/
static inline void **
cache_bin_full_position_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
assert(ret <= bin->stack_head);
return ret;
}
/* /*
* As the name implies. This is important since it's not correct to try to * As the name implies. This is important since it's not correct to try to
* batch fill a nonempty cache bin. * batch fill a nonempty cache bin.
@ -359,13 +393,17 @@ cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
return n; return n;
} }
JEMALLOC_ALWAYS_INLINE bool
cache_bin_full(cache_bin_t *bin) {
return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
}
/* /*
* Free an object into the given bin. Fails only if the bin is full. * Free an object into the given bin. Fails only if the bin is full.
*/ */
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) { cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head; if (unlikely(cache_bin_full(bin))) {
if (unlikely(low_bits == bin->low_bits_full)) {
return false; return false;
} }
@ -377,7 +415,39 @@ cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
return true; return true;
} }
/** /* Returns false if failed to stash (i.e. bin is full). */
JEMALLOC_ALWAYS_INLINE bool
cache_bin_stash(cache_bin_t *bin, void *ptr) {
if (cache_bin_full(bin)) {
return false;
}
/* Stash at the full position, in the [full, head) range. */
uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
/* Wraparound handled as well. */
uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head);
*(void **)((uintptr_t)bin->stack_head - diff) = ptr;
assert(!cache_bin_full(bin));
bin->low_bits_full += sizeof(void *);
cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
return true;
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
void **full = cache_bin_full_position_get(bin, info);
uint16_t nstashed = cache_bin_diff(bin, (uint16_t)(uintptr_t)full,
bin->low_bits_full) / sizeof(void *);
assert(nstashed <= ncached_max);
return nstashed;
}
/*
* Filling and flushing are done in batch, on arrays of void *s. For filling, * Filling and flushing are done in batch, on arrays of void *s. For filling,
* the arrays go forward, and can be accessed with ordinary array arithmetic. * the arrays go forward, and can be accessed with ordinary array arithmetic.
* For flushing, we work from the end backwards, and so need to use special * For flushing, we work from the end backwards, and so need to use special
@ -463,6 +533,27 @@ cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_low_water_adjust(bin); cache_bin_low_water_adjust(bin);
} }
static inline void
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nstashed) {
assert(nstashed > 0);
assert(cache_bin_nstashed_get(bin, info) == nstashed);
void **full = cache_bin_full_position_get(bin, info);
arr->ptr = full;
assert(*arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
void **full = cache_bin_full_position_get(bin, info);
/* Reset the bin local full position. */
bin->low_bits_full = (uint16_t)(uintptr_t)full;
assert(cache_bin_nstashed_get(bin, info) == 0);
}
/* /*
* Initialize a cache_bin_info to represent up to the given number of items in * Initialize a cache_bin_info to represent up to the given number of items in
* the cache_bins it is associated with. * the cache_bins it is associated with.

View File

@ -415,6 +415,9 @@
/* Performs additional size checks when defined. */ /* Performs additional size checks when defined. */
#undef JEMALLOC_OPT_SIZE_CHECKS #undef JEMALLOC_OPT_SIZE_CHECKS
/* Allows sampled junk and stash for checking use-after-free when defined. */
#undef JEMALLOC_UAF_DETECTION
/* Darwin VM_MAKE_TAG support */ /* Darwin VM_MAKE_TAG support */
#undef JEMALLOC_HAVE_VM_MAKE_TAG #undef JEMALLOC_HAVE_VM_MAKE_TAG

View File

@ -35,6 +35,9 @@ extern const char *zero_realloc_mode_names[];
extern atomic_zu_t zero_realloc_count; extern atomic_zu_t zero_realloc_count;
extern bool opt_cache_oblivious; extern bool opt_cache_oblivious;
/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
extern uintptr_t san_cache_bin_nonfast_mask;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;

View File

@ -198,6 +198,14 @@ static const bool config_opt_size_checks =
#endif #endif
; ;
static const bool config_uaf_detection =
#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
true
#else
false
#endif
;
/* Whether or not the C++ extensions are enabled. */ /* Whether or not the C++ extensions are enabled. */
static const bool config_enable_cxx = static const bool config_enable_cxx =
#ifdef JEMALLOC_ENABLE_CXX #ifdef JEMALLOC_ENABLE_CXX

View File

@ -10,9 +10,16 @@
#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0 #define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0 #define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL;
/* 0 means disabled, i.e. never guarded. */ /* 0 means disabled, i.e. never guarded. */
extern size_t opt_san_guard_large; extern size_t opt_san_guard_large;
extern size_t opt_san_guard_small; extern size_t opt_san_guard_small;
/* -1 means disabled, i.e. never check for use-after-free. */
extern ssize_t opt_lg_san_uaf_align;
void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool left, bool right, bool remap); emap_t *emap, bool left, bool right, bool remap);
@ -24,7 +31,10 @@ void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
*/ */
void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, emap_t *emap); edata_t *edata, emap_t *emap);
void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
void tsd_san_init(tsd_t *tsd); void tsd_san_init(tsd_t *tsd);
void san_init(ssize_t lg_san_uaf_align);
static inline void static inline void
san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
@ -121,4 +131,62 @@ san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
} }
} }
static inline void
san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
void **last) {
size_t ptr_sz = sizeof(void *);
*first = ptr;
*mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
assert(*first != *mid || usize == ptr_sz);
assert((uintptr_t)*first <= (uintptr_t)*mid);
/*
* When usize > 32K, the gap between requested_size and usize might be
* greater than 4K -- this means the last write may access an
* likely-untouched page (default settings w/ 4K pages). However by
* default the tcache only goes up to the 32K size class, and is usually
* tuned lower instead of higher, which makes it less of a concern.
*/
*last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
assert(*first != *last || usize == ptr_sz);
assert(*mid != *last || usize <= ptr_sz * 2);
assert((uintptr_t)*mid <= (uintptr_t)*last);
}
static inline bool
san_junk_ptr_should_slow(void) {
/*
* The latter condition (pointer size greater than the min size class)
* is not expected -- fall back to the slow path for simplicity.
*/
return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN);
}
static inline void
san_junk_ptr(void *ptr, size_t usize) {
if (san_junk_ptr_should_slow()) {
memset(ptr, (char)uaf_detect_junk, usize);
return;
}
void *first, *mid, *last;
san_junk_ptr_locations(ptr, usize, &first, &mid, &last);
*(uintptr_t *)first = uaf_detect_junk;
*(uintptr_t *)mid = uaf_detect_junk;
*(uintptr_t *)last = uaf_detect_junk;
}
static inline bool
san_uaf_detection_enabled(void) {
bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
if (config_uaf_detection && ret) {
assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
opt_lg_san_uaf_align) - 1);
}
return ret;
}
#endif /* JEMALLOC_INTERNAL_GUARD_H */ #endif /* JEMALLOC_INTERNAL_GUARD_H */

View File

@ -42,6 +42,8 @@ void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem); szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem); szind_t binind, unsigned rem);
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin,
szind_t binind, bool is_small);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena); tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd); tcache_t *tcache_create_explicit(tsd_t *tsd);

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/bin.h" #include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h" #include "jemalloc/internal/sz.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
@ -61,6 +62,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
return arena_malloc_hard(tsd_tsdn(tsd), arena, size, return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
binind, zero); binind, zero);
} }
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ true);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
bin, binind, &tcache_hard_success); bin, binind, &tcache_hard_success);
@ -100,6 +103,8 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL)) { if (unlikely(arena == NULL)) {
return NULL; return NULL;
} }
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ false);
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) { if (ret == NULL) {
@ -126,6 +131,21 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
cache_bin_t *bin = &tcache->bins[binind]; cache_bin_t *bin = &tcache->bins[binind];
/*
* Not marking the branch unlikely because this is past free_fastpath()
* (which handles the most common cases), i.e. at this point it's often
* uncommon cases.
*/
if (cache_bin_nonfast_aligned(ptr)) {
/* Junk unconditionally, even if bin is full. */
san_junk_ptr(ptr, sz_index2size(binind));
if (cache_bin_stash(bin, ptr)) {
return;
}
assert(cache_bin_full(bin));
/* Bin full; fall through into the flush branch. */
}
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
if (unlikely(tcache_small_bin_disabled(binind, bin))) { if (unlikely(tcache_small_bin_disabled(binind, bin))) {
arena_dalloc_small(tsd_tsdn(tsd), ptr); arena_dalloc_small(tsd_tsdn(tsd), ptr);

View File

@ -157,6 +157,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
cache_bin_t *cache_bin = &descriptor->bins[i]; cache_bin_t *cache_bin = &descriptor->bins[i];
astats->tcache_bytes += astats->tcache_bytes +=
cache_bin_ncached_get_remote(cache_bin, cache_bin_ncached_get_remote(cache_bin,
&tcache_bin_info[i]) * sz_index2size(i) +
cache_bin_nstashed_get(cache_bin,
&tcache_bin_info[i]) * sz_index2size(i); &tcache_bin_info[i]) * sz_index2size(i);
} }
} }

View File

@ -2,6 +2,8 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/safety_check.h"
void void
cache_bin_info_init(cache_bin_info_t *info, cache_bin_info_init(cache_bin_info_t *info,

View File

@ -150,6 +150,7 @@ CTL_PROTO(opt_prof_recent_alloc_max)
CTL_PROTO(opt_prof_stats) CTL_PROTO(opt_prof_stats)
CTL_PROTO(opt_prof_sys_thread_name) CTL_PROTO(opt_prof_sys_thread_name)
CTL_PROTO(opt_prof_time_res) CTL_PROTO(opt_prof_time_res)
CTL_PROTO(opt_lg_san_uaf_align)
CTL_PROTO(opt_zero_realloc) CTL_PROTO(opt_zero_realloc)
CTL_PROTO(tcache_create) CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush) CTL_PROTO(tcache_flush)
@ -472,6 +473,7 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_stats"), CTL(opt_prof_stats)}, {NAME("prof_stats"), CTL(opt_prof_stats)},
{NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)}, {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
{NAME("prof_time_resolution"), CTL(opt_prof_time_res)}, {NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
{NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
{NAME("zero_realloc"), CTL(opt_zero_realloc)} {NAME("zero_realloc"), CTL(opt_zero_realloc)}
}; };
@ -2201,6 +2203,8 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
bool) bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_time_res, CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
prof_time_res_mode_names[opt_prof_time_res], const char *) prof_time_res_mode_names[opt_prof_time_res], const char *)
CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
opt_lg_san_uaf_align, ssize_t)
CTL_RO_NL_GEN(opt_zero_realloc, CTL_RO_NL_GEN(opt_zero_realloc,
zero_realloc_mode_names[opt_zero_realloc_action], const char *) zero_realloc_mode_names[opt_zero_realloc_action], const char *)

View File

@ -1657,6 +1657,31 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
} }
CONF_CONTINUE; CONF_CONTINUE;
} }
if (config_uaf_detection &&
CONF_MATCH("lg_san_uaf_align")) {
ssize_t a;
CONF_VALUE_READ(ssize_t, a)
if (CONF_VALUE_READ_FAIL() || a < -1) {
CONF_ERROR("Invalid conf value",
k, klen, v, vlen);
}
if (a == -1) {
opt_lg_san_uaf_align = -1;
CONF_CONTINUE;
}
/* clip if necessary */
ssize_t max_allowed = (sizeof(size_t) << 3) - 1;
ssize_t min_allowed = LG_PAGE;
if (a > max_allowed) {
a = max_allowed;
} else if (a < min_allowed) {
a = min_allowed;
}
opt_lg_san_uaf_align = a;
CONF_CONTINUE;
}
CONF_HANDLE_SIZE_T(opt_san_guard_small, CONF_HANDLE_SIZE_T(opt_san_guard_small,
"san_guard_small", 0, SIZE_T_MAX, "san_guard_small", 0, SIZE_T_MAX,
@ -1760,6 +1785,7 @@ malloc_init_hard_a0_locked() {
prof_boot0(); prof_boot0();
} }
malloc_conf_init(&sc_data, bin_shard_sizes); malloc_conf_init(&sc_data, bin_shard_sizes);
san_init(opt_lg_san_uaf_align);
sz_boot(&sc_data, opt_cache_oblivious); sz_boot(&sc_data, opt_cache_oblivious);
bin_info_boot(&sc_data, bin_shard_sizes); bin_info_boot(&sc_data, bin_shard_sizes);
@ -2970,6 +2996,41 @@ free_default(void *ptr) {
} }
} }
JEMALLOC_ALWAYS_INLINE bool
free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
/*
* free_fastpath do not handle two uncommon cases: 1) sampled profiled
* objects and 2) sampled junk & stash for use-after-free detection.
* Both have special alignments which are used to escape the fastpath.
*
* prof_sample is page-aligned, which covers the UAF check when both
* are enabled (the assertion below). Avoiding redundant checks since
* this is on the fastpath -- at most one runtime branch from this.
*/
if (config_debug && cache_bin_nonfast_aligned(ptr)) {
assert(prof_sample_aligned(ptr));
}
if (config_prof && check_prof) {
/* When prof is enabled, the prof_sample alignment is enough. */
if (prof_sample_aligned(ptr)) {
return true;
} else {
return false;
}
}
if (config_uaf_detection) {
if (cache_bin_nonfast_aligned(ptr)) {
return true;
} else {
return false;
}
}
return false;
}
/* Returns whether or not the free attempt was successful. */ /* Returns whether or not the free attempt was successful. */
JEMALLOC_ALWAYS_INLINE JEMALLOC_ALWAYS_INLINE
bool free_fastpath(void *ptr, size_t size, bool size_hint) { bool free_fastpath(void *ptr, size_t size, bool size_hint) {
@ -2992,18 +3053,21 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
&arena_emap_global, ptr, &alloc_ctx); &arena_emap_global, ptr, &alloc_ctx);
/* Note: profiled objects will have alloc_ctx.slab set */ /* Note: profiled objects will have alloc_ctx.slab set */
if (unlikely(err || !alloc_ctx.slab)) { if (unlikely(err || !alloc_ctx.slab ||
free_fastpath_nonfast_aligned(ptr,
/* check_prof */ false))) {
return false; return false;
} }
assert(alloc_ctx.szind != SC_NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
} else { } else {
/* /*
* Check for both sizes that are too large, and for sampled * Check for both sizes that are too large, and for sampled /
* objects. Sampled objects are always page-aligned. The * special aligned objects. The alignment check will also check
* sampled object check will also check for null ptr. * for null ptr.
*/ */
if (unlikely(size > SC_LOOKUP_MAXCLASS || if (unlikely(size > SC_LOOKUP_MAXCLASS ||
(config_prof && prof_sample_aligned(ptr)))) { free_fastpath_nonfast_aligned(ptr,
/* check_prof */ true))) {
return false; return false;
} }
alloc_ctx.szind = sz_size2index_lookup(size); alloc_ctx.szind = sz_size2index_lookup(size);

View File

@ -10,6 +10,15 @@
size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT; size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT; size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
/* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */
ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
/*
* Initialized in san_init(). When disabled, the mask is set to (uintptr_t)-1
* to always fail the nonfast_align check.
*/
uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
static inline void static inline void
san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) { uintptr_t *addr, size_t size, bool left, bool right) {
@ -141,8 +150,59 @@ san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
/* right */ true, /* remap */ false); /* right */ true, /* remap */ false);
} }
static bool
san_stashed_corrupted(void *ptr, size_t size) {
if (san_junk_ptr_should_slow()) {
for (size_t i = 0; i < size; i++) {
if (((char *)ptr)[i] != (char)uaf_detect_junk) {
return true;
}
}
return false;
}
void *first, *mid, *last;
san_junk_ptr_locations(ptr, size, &first, &mid, &last);
if (*(uintptr_t *)first != uaf_detect_junk ||
*(uintptr_t *)mid != uaf_detect_junk ||
*(uintptr_t *)last != uaf_detect_junk) {
return true;
}
return false;
}
void
san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) {
/*
* Verify that the junked-filled & stashed pointers remain unchanged, to
* detect write-after-free.
*/
for (size_t n = 0; n < nstashed; n++) {
void *stashed = ptrs[n];
assert(stashed != NULL);
assert(cache_bin_nonfast_aligned(stashed));
if (unlikely(san_stashed_corrupted(stashed, usize))) {
safety_check_fail("<jemalloc>: Write-after-free "
"detected on deallocated pointer %p (size %zu).\n",
stashed, usize);
}
}
}
void void
tsd_san_init(tsd_t *tsd) { tsd_san_init(tsd_t *tsd) {
*tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small; *tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
*tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large; *tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
} }
void
san_init(ssize_t lg_san_uaf_align) {
assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE);
if (lg_san_uaf_align == -1) {
san_cache_bin_nonfast_mask = (uintptr_t)-1;
return;
}
san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1;
}

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
/******************************************************************************/ /******************************************************************************/
@ -179,6 +180,8 @@ tcache_event(tsd_t *tsd) {
bool is_small = (szind < SC_NBINS); bool is_small = (szind < SC_NBINS);
cache_bin_t *cache_bin = &tcache->bins[szind]; cache_bin_t *cache_bin = &tcache->bins[szind];
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin, cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
&tcache_bin_info[szind]); &tcache_bin_info[szind]);
if (low_water > 0) { if (low_water > 0) {
@ -497,6 +500,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
szind_t binind, unsigned rem, bool small) { szind_t binind, unsigned rem, bool small) {
tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin, cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
&tcache_bin_info[binind]); &tcache_bin_info[binind]);
assert((cache_bin_sz_t)rem <= ncached); assert((cache_bin_sz_t)rem <= ncached);
@ -525,6 +530,48 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false); tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false);
} }
/*
* Flushing stashed happens when 1) tcache fill, 2) tcache flush, or 3) tcache
* GC event. This makes sure that the stashed items do not hold memory for too
* long, and new buffers can only be allocated when nothing is stashed.
*
* The downside is, the time between stash and flush may be relatively short,
* especially when the request rate is high. It lowers the chance of detecting
* write-after-free -- however that is a delayed detection anyway, and is less
* of a focus than the memory overhead.
*/
void
tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
szind_t binind, bool is_small) {
cache_bin_info_t *info = &tcache_bin_info[binind];
/*
* The two below are for assertion only. The content of original cached
* items remain unchanged -- the stashed items reside on the other end
* of the stack. Checking the stack head and ncached to verify.
*/
void *head_content = *cache_bin->stack_head;
cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
info);
cache_bin_sz_t nstashed = cache_bin_nstashed_get(cache_bin, info);
assert(orig_cached + nstashed <= cache_bin_info_ncached_max(info));
if (nstashed == 0) {
return;
}
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed);
cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs,
nstashed);
san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind));
tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed,
is_small);
cache_bin_finish_flush_stashed(cache_bin, info);
assert(cache_bin_nstashed_get(cache_bin, info) == 0);
assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
assert(head_content == *cache_bin->stack_head);
}
void void
tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena) { tcache_t *tcache, arena_t *arena) {

View File

@ -26,6 +26,12 @@ do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
static inline void static inline void
do_arena_destroy(unsigned arena_ind) { do_arena_destroy(unsigned arena_ind) {
/*
* For convenience, flush tcache in case there are cached items.
* However not assert success since the tcache may be disabled.
*/
mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
size_t mib[3]; size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t); size_t miblen = sizeof(mib)/sizeof(size_t);
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,

View File

@ -82,27 +82,30 @@ do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
free(out); free(out);
} }
static void
test_bin_init(cache_bin_t *bin, cache_bin_info_t *info) {
size_t size;
size_t alignment;
cache_bin_info_compute_alloc(info, 1, &size, &alignment);
void *mem = mallocx(size, MALLOCX_ALIGN(alignment));
assert_ptr_not_null(mem, "Unexpected mallocx failure");
size_t cur_offset = 0;
cache_bin_preincrement(info, 1, mem, &cur_offset);
cache_bin_init(bin, info, mem, &cur_offset);
cache_bin_postincrement(info, 1, mem, &cur_offset);
assert_zu_eq(cur_offset, size, "Should use all requested memory");
}
TEST_BEGIN(test_cache_bin) { TEST_BEGIN(test_cache_bin) {
const int ncached_max = 100; const int ncached_max = 100;
bool success; bool success;
void *ptr; void *ptr;
cache_bin_t bin;
cache_bin_info_t info; cache_bin_info_t info;
cache_bin_info_init(&info, ncached_max); cache_bin_info_init(&info, ncached_max);
cache_bin_t bin;
size_t size; test_bin_init(&bin, &info);
size_t alignment;
cache_bin_info_compute_alloc(&info, 1, &size, &alignment);
void *mem = mallocx(size, MALLOCX_ALIGN(alignment));
assert_ptr_not_null(mem, "Unexpected mallocx failure");
size_t cur_offset = 0;
cache_bin_preincrement(&info, 1, mem, &cur_offset);
cache_bin_init(&bin, &info, mem, &cur_offset);
cache_bin_postincrement(&info, 1, mem, &cur_offset);
assert_zu_eq(cur_offset, size, "Should use all requested memory");
/* Initialize to empty; should then have 0 elements. */ /* Initialize to empty; should then have 0 elements. */
expect_d_eq(ncached_max, cache_bin_info_ncached_max(&info), ""); expect_d_eq(ncached_max, cache_bin_info_ncached_max(&info), "");
@ -258,7 +261,123 @@ TEST_BEGIN(test_cache_bin) {
} }
TEST_END TEST_END
static void
do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
"Bin not empty");
expect_true(cache_bin_nstashed_get(bin, info) == 0, "Bin not empty");
expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
bool ret;
/* Fill */
for (cache_bin_sz_t i = 0; i < nfill; i++) {
ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
expect_true(ret, "Unexpected fill failure");
}
expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
"Wrong cached count");
/* Stash */
for (cache_bin_sz_t i = 0; i < nstash; i++) {
ret = cache_bin_stash(bin, &ptrs[i + nfill]);
expect_true(ret, "Unexpected stash failure");
}
expect_true(cache_bin_nstashed_get(bin, info) == nstash,
"Wrong stashed count");
if (nfill + nstash == info->ncached_max) {
ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
expect_false(ret, "Should not dalloc into a full bin");
ret = cache_bin_stash(bin, &ptrs[0]);
expect_false(ret, "Should not stash into a full bin");
}
/* Alloc filled ones */
for (cache_bin_sz_t i = 0; i < nfill; i++) {
void *ptr = cache_bin_alloc(bin, &ret);
expect_true(ret, "Unexpected alloc failure");
/* Verify it's not from the stashed range. */
expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
"Should not alloc stashed ptrs");
}
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
"Wrong cached count");
expect_true(cache_bin_nstashed_get(bin, info) == nstash,
"Wrong stashed count");
cache_bin_alloc(bin, &ret);
expect_false(ret, "Should not alloc stashed");
/* Clear stashed ones */
cache_bin_finish_flush_stashed(bin, info);
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
"Wrong cached count");
expect_true(cache_bin_nstashed_get(bin, info) == 0,
"Wrong stashed count");
cache_bin_alloc(bin, &ret);
expect_false(ret, "Should not alloc from empty bin");
}
TEST_BEGIN(test_cache_bin_stash) {
const int ncached_max = 100;
cache_bin_t bin;
cache_bin_info_t info;
cache_bin_info_init(&info, ncached_max);
test_bin_init(&bin, &info);
/*
* The content of this array is not accessed; instead the interior
* addresses are used to insert / stash into the bins as test pointers.
*/
void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
bool ret;
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
expect_true(cache_bin_ncached_get_local(&bin, &info) ==
(i / 2 + i % 2), "Wrong ncached value");
expect_true(cache_bin_nstashed_get(&bin, &info) == i / 2,
"Wrong nstashed value");
if (i % 2 == 0) {
cache_bin_dalloc_easy(&bin, &ptrs[i]);
} else {
ret = cache_bin_stash(&bin, &ptrs[i]);
expect_true(ret, "Should be able to stash into a "
"non-full cache bin");
}
}
ret = cache_bin_dalloc_easy(&bin, &ptrs[0]);
expect_false(ret, "Should not dalloc into a full cache bin");
ret = cache_bin_stash(&bin, &ptrs[0]);
expect_false(ret, "Should not stash into a full cache bin");
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
void *ptr = cache_bin_alloc(&bin, &ret);
if (i < ncached_max / 2) {
expect_true(ret, "Should be able to alloc");
uintptr_t diff = ((uintptr_t)ptr - (uintptr_t)&ptrs[0])
/ sizeof(void *);
expect_true(diff % 2 == 0, "Should be able to alloc");
} else {
expect_false(ret, "Should not alloc stashed");
expect_true(cache_bin_nstashed_get(&bin, &info) ==
ncached_max / 2, "Wrong nstashed value");
}
}
test_bin_init(&bin, &info);
do_flush_stashed_test(&bin, &info, ptrs, ncached_max, 0);
do_flush_stashed_test(&bin, &info, ptrs, 0, ncached_max);
do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 2);
do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 4);
}
TEST_END
int int
main(void) { main(void) {
return test(test_cache_bin); return test(test_cache_bin,
test_cache_bin_stash);
} }

View File

@ -323,6 +323,7 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof); TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof);
TEST_MALLCTL_OPT(bool, prof_stats, prof); TEST_MALLCTL_OPT(bool, prof_stats, prof);
TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof); TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
#undef TEST_MALLCTL_OPT #undef TEST_MALLCTL_OPT
} }
@ -368,7 +369,7 @@ TEST_BEGIN(test_tcache_none) {
/* Make sure that tcache-based allocation returns p, not q. */ /* Make sure that tcache-based allocation returns p, not q. */
void *p1 = mallocx(42, 0); void *p1 = mallocx(42, 0);
expect_ptr_not_null(p1, "Unexpected mallocx() failure"); expect_ptr_not_null(p1, "Unexpected mallocx() failure");
if (!opt_prof) { if (!opt_prof && !san_uaf_detection_enabled()) {
expect_ptr_eq(p0, p1, expect_ptr_eq(p0, p1,
"Expected tcache to allocate cached region"); "Expected tcache to allocate cached region");
} }
@ -434,8 +435,10 @@ TEST_BEGIN(test_tcache) {
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
i); i);
expect_ptr_eq(ps[i], p0, if (!san_uaf_detection_enabled()) {
"Expected mallocx() to allocate cached region, i=%u", i); expect_ptr_eq(ps[i], p0, "Expected mallocx() to "
"allocate cached region, i=%u", i);
}
} }
/* Verify that reallocation uses cached regions. */ /* Verify that reallocation uses cached regions. */
@ -444,8 +447,10 @@ TEST_BEGIN(test_tcache) {
qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
i); i);
expect_ptr_eq(qs[i], q0, if (!san_uaf_detection_enabled()) {
"Expected rallocx() to allocate cached region, i=%u", i); expect_ptr_eq(qs[i], q0, "Expected rallocx() to "
"allocate cached region, i=%u", i);
}
/* Avoid undefined behavior in case of test failure. */ /* Avoid undefined behavior in case of test failure. */
if (qs[i] == NULL) { if (qs[i] == NULL) {
qs[i] = ps[i]; qs[i] = ps[i];

View File

@ -152,6 +152,7 @@ TEST_BEGIN(test_tcache_max) {
test_skip_if(!config_stats); test_skip_if(!config_stats);
test_skip_if(!opt_tcache); test_skip_if(!opt_tcache);
test_skip_if(opt_prof); test_skip_if(opt_prof);
test_skip_if(san_uaf_detection_enabled());
for (alloc_option = alloc_option_start; for (alloc_option = alloc_option_start;
alloc_option < alloc_option_end; alloc_option < alloc_option_end;

View File

@ -1,3 +1,3 @@
#!/bin/sh #!/bin/sh
export MALLOC_CONF="tcache_max:1024" export MALLOC_CONF="tcache_max:1024,lg_san_uaf_align:-1"

225
test/unit/uaf.c Normal file
View File

@ -0,0 +1,225 @@
#include "test/jemalloc_test.h"
#include "test/arena_util.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/safety_check.h"
static size_t san_uaf_align;
static bool fake_abort_called;
void fake_abort(const char *message) {
(void)message;
fake_abort_called = true;
}
static void
test_write_after_free_pre(void) {
safety_check_set_abort(&fake_abort);
fake_abort_called = false;
}
static void
test_write_after_free_post(void) {
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
0, "Unexpected tcache flush failure");
expect_true(fake_abort_called, "Use-after-free check didn't fire.");
safety_check_set_abort(NULL);
}
static bool
uaf_detection_enabled(void) {
if (!config_uaf_detection) {
return false;
}
ssize_t lg_san_uaf_align;
size_t sz = sizeof(lg_san_uaf_align);
assert_d_eq(mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
if (lg_san_uaf_align < 0) {
return false;
}
assert_zd_ge(lg_san_uaf_align, LG_PAGE, "san_uaf_align out of range");
san_uaf_align = (size_t)1 << lg_san_uaf_align;
bool tcache_enabled;
sz = sizeof(tcache_enabled);
assert_d_eq(mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL,
0), 0, "Unexpected mallctl failure");
if (!tcache_enabled) {
return false;
}
return true;
}
static void
test_use_after_free(size_t alloc_size, bool write_after_free) {
void *ptr = (void *)(uintptr_t)san_uaf_align;
assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
ptr = (void *)((uintptr_t)123 * (uintptr_t)san_uaf_align);
assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
ptr = (void *)((uintptr_t)san_uaf_align + 1);
assert_false(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
/*
* Disable purging (-1) so that all dirty pages remain committed, to
* make use-after-free tolerable.
*/
unsigned arena_ind = do_arena_create(-1, -1);
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
size_t n_max = san_uaf_align * 2;
void **items = mallocx(n_max * sizeof(void *), flags);
assert_ptr_not_null(items, "Unexpected mallocx failure");
bool found = false;
size_t iter = 0;
char magic = 's';
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
0, "Unexpected tcache flush failure");
while (!found) {
ptr = mallocx(alloc_size, flags);
assert_ptr_not_null(ptr, "Unexpected mallocx failure");
found = cache_bin_nonfast_aligned(ptr);
*(char *)ptr = magic;
items[iter] = ptr;
assert_zu_lt(iter++, n_max, "No aligned ptr found");
}
if (write_after_free) {
test_write_after_free_pre();
}
bool junked = false;
while (iter-- != 0) {
char *volatile mem = items[iter];
assert_c_eq(*mem, magic, "Unexpected memory content");
free(mem);
if (*mem != magic) {
junked = true;
assert_c_eq(*mem, (char)uaf_detect_junk,
"Unexpected junk-filling bytes");
if (write_after_free) {
*(char *)mem = magic + 1;
}
}
/* Flush tcache (including stashed). */
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
0, "Unexpected tcache flush failure");
}
expect_true(junked, "Aligned ptr not junked");
if (write_after_free) {
test_write_after_free_post();
}
dallocx(items, flags);
do_arena_destroy(arena_ind);
}
TEST_BEGIN(test_read_after_free) {
test_skip_if(!uaf_detection_enabled());
test_use_after_free(sizeof(void *), /* write_after_free */ false);
test_use_after_free(sizeof(void *) + 1, /* write_after_free */ false);
test_use_after_free(16, /* write_after_free */ false);
test_use_after_free(20, /* write_after_free */ false);
test_use_after_free(32, /* write_after_free */ false);
test_use_after_free(33, /* write_after_free */ false);
test_use_after_free(48, /* write_after_free */ false);
test_use_after_free(64, /* write_after_free */ false);
test_use_after_free(65, /* write_after_free */ false);
test_use_after_free(129, /* write_after_free */ false);
test_use_after_free(255, /* write_after_free */ false);
test_use_after_free(256, /* write_after_free */ false);
}
TEST_END
TEST_BEGIN(test_write_after_free) {
test_skip_if(!uaf_detection_enabled());
test_use_after_free(sizeof(void *), /* write_after_free */ true);
test_use_after_free(sizeof(void *) + 1, /* write_after_free */ true);
test_use_after_free(16, /* write_after_free */ true);
test_use_after_free(20, /* write_after_free */ true);
test_use_after_free(32, /* write_after_free */ true);
test_use_after_free(33, /* write_after_free */ true);
test_use_after_free(48, /* write_after_free */ true);
test_use_after_free(64, /* write_after_free */ true);
test_use_after_free(65, /* write_after_free */ true);
test_use_after_free(129, /* write_after_free */ true);
test_use_after_free(255, /* write_after_free */ true);
test_use_after_free(256, /* write_after_free */ true);
}
TEST_END
static bool
check_allocated_intact(void **allocated, size_t n_alloc) {
for (unsigned i = 0; i < n_alloc; i++) {
void *ptr = *(void **)allocated[i];
bool found = false;
for (unsigned j = 0; j < n_alloc; j++) {
if (ptr == allocated[j]) {
found = true;
break;
}
}
if (!found) {
return false;
}
}
return true;
}
TEST_BEGIN(test_use_after_free_integration) {
test_skip_if(!uaf_detection_enabled());
unsigned arena_ind = do_arena_create(-1, -1);
int flags = MALLOCX_ARENA(arena_ind);
size_t n_alloc = san_uaf_align * 2;
void **allocated = mallocx(n_alloc * sizeof(void *), flags);
assert_ptr_not_null(allocated, "Unexpected mallocx failure");
for (unsigned i = 0; i < n_alloc; i++) {
allocated[i] = mallocx(sizeof(void *) * 8, flags);
assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
if (i > 0) {
/* Emulate a circular list. */
*(void **)allocated[i] = allocated[i - 1];
}
}
*(void **)allocated[0] = allocated[n_alloc - 1];
expect_true(check_allocated_intact(allocated, n_alloc),
"Allocated data corrupted");
for (unsigned i = 0; i < n_alloc; i++) {
free(allocated[i]);
}
/* Read-after-free */
expect_false(check_allocated_intact(allocated, n_alloc),
"Junk-filling not detected");
test_write_after_free_pre();
for (unsigned i = 0; i < n_alloc; i++) {
allocated[i] = mallocx(sizeof(void *), flags);
assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
*(void **)allocated[i] = (void *)(uintptr_t)i;
}
/* Write-after-free */
for (unsigned i = 0; i < n_alloc; i++) {
free(allocated[i]);
*(void **)allocated[i] = NULL;
}
test_write_after_free_post();
}
TEST_END
int
main(void) {
return test(
test_read_after_free,
test_write_after_free,
test_use_after_free_integration);
}

3
test/unit/uaf.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/sh
export MALLOC_CONF="lg_san_uaf_align:12"