From eaed1e39be8574b1a59d21824b68e31af378cd0f Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Mon, 3 Aug 2020 18:23:36 -0700 Subject: [PATCH] Add sized-delete size-checking functionality. The existing checks are good at finding such issues (on tcache flush), but not so good at pinpointing them. Debug mode can find them, but sometimes debug mode slows down a program so much that hard-to-hit bugs can take a long time to crash. This commit adds functionality to keep programs mostly on their fast paths, while also checking every sized delete argument they get. --- Makefile.in | 1 + configure.ac | 17 +++++ .../internal/jemalloc_internal_defs.h.in | 3 + .../jemalloc/internal/jemalloc_preamble.h.in | 13 ++++ src/jemalloc.c | 48 +++++++++++--- test/unit/size_check.c | 62 +++++++++++++++++++ 6 files changed, 135 insertions(+), 9 deletions(-) create mode 100644 test/unit/size_check.c diff --git a/Makefile.in b/Makefile.in index 7d147583..a63f69f1 100644 --- a/Makefile.in +++ b/Makefile.in @@ -247,6 +247,7 @@ TESTS_UNIT := \ $(srcroot)test/unit/sc.c \ $(srcroot)test/unit/seq.c \ $(srcroot)test/unit/SFMT.c \ + $(srcroot)test/unit/size_check.c \ $(srcroot)test/unit/size_classes.c \ $(srcroot)test/unit/slab.c \ $(srcroot)test/unit/smoothstep.c \ diff --git a/configure.ac b/configure.ac index b197d32e..d68d376c 100644 --- a/configure.ac +++ b/configure.ac @@ -1492,6 +1492,23 @@ if test "x$enable_opt_safety_checks" = "x1" ; then fi AC_SUBST([enable_opt_safety_checks]) +dnl Look for sized-deallocation bugs while otherwise being in opt mode. +AC_ARG_ENABLE([opt-size-checks], + [AS_HELP_STRING([--enable-opt-size-checks], + [Perform sized-deallocation argument checks, even in opt mode])], +[if test "x$enable_opt_size_checks" = "xno" ; then + enable_opt_size_checks="0" +else + enable_opt_size_checks="1" +fi +], +[enable_opt_size_checks="0"] +) +if test "x$enable_opt_size_checks" = "x1" ; then + AC_DEFINE([JEMALLOC_OPT_SIZE_CHECKS], [ ]) +fi +AC_SUBST([enable_opt_size_checks]) + JE_COMPILABLE([a program using __builtin_unreachable], [ void foo (void) { __builtin_unreachable(); diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index 0aef0bb3..ee052bb8 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -373,4 +373,7 @@ /* Performs additional safety checks when defined. */ #undef JEMALLOC_OPT_SAFETY_CHECKS +/* Performs additional size checks when defined. */ +#undef JEMALLOC_OPT_SIZE_CHECKS + #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/include/jemalloc/internal/jemalloc_preamble.h.in b/include/jemalloc/internal/jemalloc_preamble.h.in index 740fcfcb..4012eb25 100644 --- a/include/jemalloc/internal/jemalloc_preamble.h.in +++ b/include/jemalloc/internal/jemalloc_preamble.h.in @@ -180,6 +180,19 @@ static const bool config_opt_safety_checks = #endif ; +/* + * Extra debugging of sized deallocations too onerous to be included in the + * general safety checks. + */ +static const bool config_opt_size_checks = +#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_OPT_SAFETY_CHECKS) \ + || defined(JEMALLOC_DEBUG) + true +#else + false +#endif + ; + #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) /* Currently percpu_arena depends on sched_getcpu. */ #define JEMALLOC_PERCPU_ARENA diff --git a/src/jemalloc.c b/src/jemalloc.c index ae9ef3d1..51a1a23a 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2793,6 +2793,27 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { thread_dalloc_event(tsd, usize); } +JEMALLOC_ALWAYS_INLINE bool +maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) { + if (config_opt_size_checks) { + emap_alloc_ctx_t dbg_ctx; + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, + &dbg_ctx); + if (alloc_ctx->szind != dbg_ctx.szind) { + safety_check_fail_sized_dealloc( + /* curent_dealloc */ true); + return true; + } + if (alloc_ctx->slab != dbg_ctx.slab) { + safety_check_fail( + "Internal heap corruption detected: " + "mismatch in slab bit"); + return true; + } + } + return false; +} + JEMALLOC_ALWAYS_INLINE void isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { if (!slow_path) { @@ -2823,13 +2844,6 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { /* Non page aligned must be slab allocated. */ alloc_ctx.slab = true; } - if (config_debug) { - emap_alloc_ctx_t dbg_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), - &arena_emap_global, ptr, &dbg_ctx); - assert(dbg_ctx.szind == alloc_ctx.szind); - assert(dbg_ctx.slab == alloc_ctx.slab); - } } else if (opt_prof) { emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); @@ -2845,6 +2859,16 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } } + bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); + if (fail) { + /* + * This is a heap corruption bug. In real life we'll crash; for + * the unit test we just want to avoid breaking anything too + * badly to get a test result out. Let's leak instead of trying + * to free. + */ + return; + } if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, &alloc_ctx); @@ -2934,8 +2958,15 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) { return false; } alloc_ctx.szind = sz_size2index_lookup(size); - alloc_ctx.slab = false; + /* This is a dead store, except when opt size checking is on. */ + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } + bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); + if (fail) { + /* See the comment in isfree. */ + return true; + } + uint64_t deallocated, threshold; te_free_fastpath_ctx(tsd, &deallocated, &threshold, size_hint); @@ -3739,7 +3770,6 @@ sdallocx_default(void *ptr, size_t size, int flags) { tsd_t *tsd = tsd_fetch_min(); bool fast = tsd_fast(tsd); size_t usize = inallocx(tsd_tsdn(tsd), size, flags); - assert(usize == isalloc(tsd_tsdn(tsd), ptr)); check_entry_exit_locking(tsd_tsdn(tsd)); unsigned tcache_ind = mallocx_tcache_get(flags); diff --git a/test/unit/size_check.c b/test/unit/size_check.c new file mode 100644 index 00000000..3d2912df --- /dev/null +++ b/test/unit/size_check.c @@ -0,0 +1,62 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/safety_check.h" + +bool fake_abort_called; +void fake_abort(const char *message) { + (void)message; + fake_abort_called = true; +} + +#define SIZE1 SC_SMALL_MAXCLASS +#define SIZE2 (SC_SMALL_MAXCLASS / 2) + +TEST_BEGIN(test_invalid_size_sdallocx) { + test_skip_if(!config_opt_size_checks); + safety_check_set_abort(&fake_abort); + + fake_abort_called = false; + void *ptr = malloc(SIZE1); + assert_ptr_not_null(ptr, "Unexpected failure"); + sdallocx(ptr, SIZE2, 0); + expect_true(fake_abort_called, "Safety check didn't fire"); + + safety_check_set_abort(NULL); +} +TEST_END + +TEST_BEGIN(test_invalid_size_sdallocx_nonzero_flag) { + test_skip_if(!config_opt_size_checks); + safety_check_set_abort(&fake_abort); + + fake_abort_called = false; + void *ptr = malloc(SIZE1); + assert_ptr_not_null(ptr, "Unexpected failure"); + sdallocx(ptr, SIZE2, MALLOCX_TCACHE_NONE); + expect_true(fake_abort_called, "Safety check didn't fire"); + + safety_check_set_abort(NULL); +} +TEST_END + +TEST_BEGIN(test_invalid_size_sdallocx_noflags) { + test_skip_if(!config_opt_size_checks); + safety_check_set_abort(&fake_abort); + + fake_abort_called = false; + void *ptr = malloc(SIZE1); + assert_ptr_not_null(ptr, "Unexpected failure"); + je_sdallocx_noflags(ptr, SIZE2); + expect_true(fake_abort_called, "Safety check didn't fire"); + + safety_check_set_abort(NULL); +} +TEST_END + +int +main(void) { + return test( + test_invalid_size_sdallocx, + test_invalid_size_sdallocx_nonzero_flag, + test_invalid_size_sdallocx_noflags); +}