Add sized-delete size-checking functionality.
The existing checks are good at finding such issues (on tcache flush), but not so good at pinpointing them. Debug mode can find them, but sometimes debug mode slows down a program so much that hard-to-hit bugs can take a long time to crash. This commit adds functionality to keep programs mostly on their fast paths, while also checking every sized delete argument they get.
This commit is contained in:
parent
53084cc5c2
commit
eaed1e39be
@ -247,6 +247,7 @@ TESTS_UNIT := \
|
||||
$(srcroot)test/unit/sc.c \
|
||||
$(srcroot)test/unit/seq.c \
|
||||
$(srcroot)test/unit/SFMT.c \
|
||||
$(srcroot)test/unit/size_check.c \
|
||||
$(srcroot)test/unit/size_classes.c \
|
||||
$(srcroot)test/unit/slab.c \
|
||||
$(srcroot)test/unit/smoothstep.c \
|
||||
|
17
configure.ac
17
configure.ac
@ -1492,6 +1492,23 @@ if test "x$enable_opt_safety_checks" = "x1" ; then
|
||||
fi
|
||||
AC_SUBST([enable_opt_safety_checks])
|
||||
|
||||
dnl Look for sized-deallocation bugs while otherwise being in opt mode.
|
||||
AC_ARG_ENABLE([opt-size-checks],
|
||||
[AS_HELP_STRING([--enable-opt-size-checks],
|
||||
[Perform sized-deallocation argument checks, even in opt mode])],
|
||||
[if test "x$enable_opt_size_checks" = "xno" ; then
|
||||
enable_opt_size_checks="0"
|
||||
else
|
||||
enable_opt_size_checks="1"
|
||||
fi
|
||||
],
|
||||
[enable_opt_size_checks="0"]
|
||||
)
|
||||
if test "x$enable_opt_size_checks" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_OPT_SIZE_CHECKS], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_opt_size_checks])
|
||||
|
||||
JE_COMPILABLE([a program using __builtin_unreachable], [
|
||||
void foo (void) {
|
||||
__builtin_unreachable();
|
||||
|
@ -373,4 +373,7 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
#undef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
#undef JEMALLOC_OPT_SIZE_CHECKS
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -180,6 +180,19 @@ static const bool config_opt_safety_checks =
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Extra debugging of sized deallocations too onerous to be included in the
|
||||
* general safety checks.
|
||||
*/
|
||||
static const bool config_opt_size_checks =
|
||||
#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_OPT_SAFETY_CHECKS) \
|
||||
|| defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
|
@ -2793,6 +2793,27 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
||||
thread_dalloc_event(tsd, usize);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
|
||||
if (config_opt_size_checks) {
|
||||
emap_alloc_ctx_t dbg_ctx;
|
||||
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
|
||||
&dbg_ctx);
|
||||
if (alloc_ctx->szind != dbg_ctx.szind) {
|
||||
safety_check_fail_sized_dealloc(
|
||||
/* curent_dealloc */ true);
|
||||
return true;
|
||||
}
|
||||
if (alloc_ctx->slab != dbg_ctx.slab) {
|
||||
safety_check_fail(
|
||||
"Internal heap corruption detected: "
|
||||
"mismatch in slab bit");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||
if (!slow_path) {
|
||||
@ -2823,13 +2844,6 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||
/* Non page aligned must be slab allocated. */
|
||||
alloc_ctx.slab = true;
|
||||
}
|
||||
if (config_debug) {
|
||||
emap_alloc_ctx_t dbg_ctx;
|
||||
emap_alloc_ctx_lookup(tsd_tsdn(tsd),
|
||||
&arena_emap_global, ptr, &dbg_ctx);
|
||||
assert(dbg_ctx.szind == alloc_ctx.szind);
|
||||
assert(dbg_ctx.slab == alloc_ctx.slab);
|
||||
}
|
||||
} else if (opt_prof) {
|
||||
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
|
||||
ptr, &alloc_ctx);
|
||||
@ -2845,6 +2859,16 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
||||
}
|
||||
}
|
||||
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
|
||||
if (fail) {
|
||||
/*
|
||||
* This is a heap corruption bug. In real life we'll crash; for
|
||||
* the unit test we just want to avoid breaking anything too
|
||||
* badly to get a test result out. Let's leak instead of trying
|
||||
* to free.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
prof_free(tsd, ptr, usize, &alloc_ctx);
|
||||
@ -2934,8 +2958,15 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
|
||||
return false;
|
||||
}
|
||||
alloc_ctx.szind = sz_size2index_lookup(size);
|
||||
alloc_ctx.slab = false;
|
||||
/* This is a dead store, except when opt size checking is on. */
|
||||
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
|
||||
}
|
||||
bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
|
||||
if (fail) {
|
||||
/* See the comment in isfree. */
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t deallocated, threshold;
|
||||
te_free_fastpath_ctx(tsd, &deallocated, &threshold, size_hint);
|
||||
|
||||
@ -3739,7 +3770,6 @@ sdallocx_default(void *ptr, size_t size, int flags) {
|
||||
tsd_t *tsd = tsd_fetch_min();
|
||||
bool fast = tsd_fast(tsd);
|
||||
size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
unsigned tcache_ind = mallocx_tcache_get(flags);
|
||||
|
62
test/unit/size_check.c
Normal file
62
test/unit/size_check.c
Normal file
@ -0,0 +1,62 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
|
||||
bool fake_abort_called;
|
||||
void fake_abort(const char *message) {
|
||||
(void)message;
|
||||
fake_abort_called = true;
|
||||
}
|
||||
|
||||
#define SIZE1 SC_SMALL_MAXCLASS
|
||||
#define SIZE2 (SC_SMALL_MAXCLASS / 2)
|
||||
|
||||
TEST_BEGIN(test_invalid_size_sdallocx) {
|
||||
test_skip_if(!config_opt_size_checks);
|
||||
safety_check_set_abort(&fake_abort);
|
||||
|
||||
fake_abort_called = false;
|
||||
void *ptr = malloc(SIZE1);
|
||||
assert_ptr_not_null(ptr, "Unexpected failure");
|
||||
sdallocx(ptr, SIZE2, 0);
|
||||
expect_true(fake_abort_called, "Safety check didn't fire");
|
||||
|
||||
safety_check_set_abort(NULL);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_invalid_size_sdallocx_nonzero_flag) {
|
||||
test_skip_if(!config_opt_size_checks);
|
||||
safety_check_set_abort(&fake_abort);
|
||||
|
||||
fake_abort_called = false;
|
||||
void *ptr = malloc(SIZE1);
|
||||
assert_ptr_not_null(ptr, "Unexpected failure");
|
||||
sdallocx(ptr, SIZE2, MALLOCX_TCACHE_NONE);
|
||||
expect_true(fake_abort_called, "Safety check didn't fire");
|
||||
|
||||
safety_check_set_abort(NULL);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_invalid_size_sdallocx_noflags) {
|
||||
test_skip_if(!config_opt_size_checks);
|
||||
safety_check_set_abort(&fake_abort);
|
||||
|
||||
fake_abort_called = false;
|
||||
void *ptr = malloc(SIZE1);
|
||||
assert_ptr_not_null(ptr, "Unexpected failure");
|
||||
je_sdallocx_noflags(ptr, SIZE2);
|
||||
expect_true(fake_abort_called, "Safety check didn't fire");
|
||||
|
||||
safety_check_set_abort(NULL);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
return test(
|
||||
test_invalid_size_sdallocx,
|
||||
test_invalid_size_sdallocx_nonzero_flag,
|
||||
test_invalid_size_sdallocx_noflags);
|
||||
}
|
Loading…
Reference in New Issue
Block a user