From 26b1c1398264dec25bf998f6bec21799ad4513da Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Mon, 26 Feb 2018 17:11:29 -0800 Subject: [PATCH] Background threads: fix an indexing bug. We have a buffer overrun that manifests in the case where arena indices higher than the number of CPUs are accessed before arena indices lower than the number of CPUs. This fixes the bug and adds a test. --- Makefile.in | 1 + src/background_thread.c | 3 ++- test/unit/background_thread_enable.c | 34 ++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 test/unit/background_thread_enable.c diff --git a/Makefile.in b/Makefile.in index 96c4ae03..aefd6d87 100644 --- a/Makefile.in +++ b/Makefile.in @@ -162,6 +162,7 @@ TESTS_UNIT := \ $(srcroot)test/unit/arena_reset.c \ $(srcroot)test/unit/atomic.c \ $(srcroot)test/unit/background_thread.c \ + $(srcroot)test/unit/background_thread_enable.c \ $(srcroot)test/unit/base.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/ckh.c \ diff --git a/src/background_thread.c b/src/background_thread.c index 6baff22b..a8a5a052 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -600,7 +600,8 @@ background_threads_enable(tsd_t *tsd) { arena_get(tsd_tsdn(tsd), i, false) == NULL) { continue; } - background_thread_info_t *info = &background_thread_info[i]; + background_thread_info_t *info = &background_thread_info[ + i % ncpus]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); assert(info->state == background_thread_stopped); background_thread_init(tsd, info); diff --git a/test/unit/background_thread_enable.c b/test/unit/background_thread_enable.c new file mode 100644 index 00000000..9bb58652 --- /dev/null +++ b/test/unit/background_thread_enable.c @@ -0,0 +1,34 @@ +#include "test/jemalloc_test.h" + +const char *malloc_conf = "background_thread:false,narenas:1"; + +TEST_BEGIN(test_deferred) { + test_skip_if(!have_background_thread); + + unsigned id; + size_t sz_u = sizeof(unsigned); + + /* + * 10 here is somewhat arbitrary, except insofar as we want to ensure + * that the number of background threads is smaller than the number of + * arenas. I'll ragequit long before we have to spin up 10 threads per + * cpu to handle background purging, so this is a conservative + * approximation. + */ + for (unsigned i = 0; i < 10 * ncpus; i++) { + assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, + "Failed to create arena"); + } + + bool enable = true; + size_t sz_b = sizeof(bool); + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to enable background threads"); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_deferred); +}