From b83b5ad44a51a18d9b9813906d22c7e008d2b517 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Fri, 9 Jun 2017 00:00:48 -0700 Subject: [PATCH] Not re-enable background thread after fork. Avoid calling pthread_create in postfork handlers. --- doc/jemalloc.xml.in | 5 ++- src/background_thread.c | 79 +++++++++++++++++++++++------------------ src/jemalloc.c | 1 - 3 files changed, 49 insertions(+), 36 deletions(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 41e80049..21e401ac 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -750,7 +750,10 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay", background threads will be no more than the number of CPUs or active arenas). Threads run periodically, and handle purging asynchronously. When switching - off, background threads are terminated synchronously. See fork2 + function, the state in the child process will be disabled regardless + the state in parent process. See stats.background_thread for related stats. opt.background_thread diff --git a/src/background_thread.c b/src/background_thread.c index f2bc474a..09e08b0b 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -350,24 +350,38 @@ check_background_thread_creation(tsd_t *tsd, unsigned *n_created, } background_thread_info_t *info = &background_thread_info[i]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); - if (info->started) { - pre_reentrancy(tsd); - int err = pthread_create_wrapper(&info->thread, NULL, - background_thread_entry, (void *)(uintptr_t)i); - post_reentrancy(tsd); - if (err == 0) { - (*n_created)++; - created_threads[i] = true; - } else { - malloc_printf(": background thread " - "creation failed (%d)\n", err); - if (opt_abort) { - abort(); - } + bool create = info->started; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (!create) { + continue; + } + + /* + * To avoid deadlock with prefork handlers (which waits for the + * mutex held here), unlock before calling pthread_create(). + */ + malloc_mutex_unlock(tsd_tsdn(tsd), + &background_thread_info[0].mtx); + pre_reentrancy(tsd); + int err = pthread_create_wrapper(&info->thread, NULL, + background_thread_entry, (void *)(uintptr_t)i); + post_reentrancy(tsd); + malloc_mutex_lock(tsd_tsdn(tsd), + &background_thread_info[0].mtx); + + if (err == 0) { + (*n_created)++; + created_threads[i] = true; + } else { + malloc_printf(": background thread " + "creation failed (%d)\n", err); + if (opt_abort) { + abort(); } } - malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + /* Since we unlocked and may miss signals, restart. */ + i = 1; } } @@ -643,14 +657,7 @@ label_done: void background_thread_prefork0(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &background_thread_lock); - if (background_thread_enabled()) { - background_thread_enabled_at_fork = true; - background_thread_enabled_set(tsdn, false); - background_threads_disable(tsdn_tsd(tsdn)); - } else { - background_thread_enabled_at_fork = false; - } - assert(n_background_threads == 0); + background_thread_enabled_at_fork = background_thread_enabled(); } void @@ -660,22 +667,12 @@ background_thread_prefork1(tsdn_t *tsdn) { } } -static void -background_thread_postfork_init(tsdn_t *tsdn) { - assert(n_background_threads == 0); - if (background_thread_enabled_at_fork) { - background_thread_enabled_set(tsdn, true); - background_threads_enable(tsdn_tsd(tsdn)); - } -} - void background_thread_postfork_parent(tsdn_t *tsdn) { for (unsigned i = 0; i < ncpus; i++) { malloc_mutex_postfork_parent(tsdn, &background_thread_info[i].mtx); } - background_thread_postfork_init(tsdn); malloc_mutex_postfork_parent(tsdn, &background_thread_lock); } @@ -686,9 +683,23 @@ background_thread_postfork_child(tsdn_t *tsdn) { &background_thread_info[i].mtx); } malloc_mutex_postfork_child(tsdn, &background_thread_lock); + if (!background_thread_enabled_at_fork) { + return; + } + /* Clear background_thread state (reset to disabled for child). */ malloc_mutex_lock(tsdn, &background_thread_lock); - background_thread_postfork_init(tsdn); + n_background_threads = 0; + background_thread_enabled_set(tsdn, false); + for (unsigned i = 0; i < ncpus; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + info->started = false; + int ret = pthread_cond_init(&info->cond, NULL); + assert(ret == 0); + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } malloc_mutex_unlock(tsdn, &background_thread_lock); } diff --git a/src/jemalloc.c b/src/jemalloc.c index e2865d25..52c86aa6 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -340,7 +340,6 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { if (ind == 0) { return; } - /* background_thread_create() handles reentrancy internally. */ if (have_background_thread) { bool err; malloc_mutex_lock(tsdn, &background_thread_lock);