From d955d6f2be7f17ba1f9a81f457e72565474cf18d Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Tue, 13 Jun 2017 16:16:33 -0700 Subject: [PATCH 01/19] Fix extent_hooks in extent_grow_retained(). This issue caused the default extent alloc function to be incorrectly used even when arena..extent_hooks is set. This bug was introduced by 411697adcda2fd75e135cdcdafb95f2bd295dc7f (Use exponential series to size extents.), which was first released in 5.0.0. --- src/extent.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/extent.c b/src/extent.c index 386a7ce6..f31ed32e 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1066,9 +1066,18 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, } bool zeroed = false; bool committed = false; - void *ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE, - &zeroed, &committed, (dss_prec_t)atomic_load_u(&arena->dss_prec, - ATOMIC_RELAXED)); + + void *ptr; + if (*r_extent_hooks == &extent_hooks_default) { + ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE, + &zeroed, &committed, (dss_prec_t)atomic_load_u( + &arena->dss_prec, ATOMIC_RELAXED)); + } else { + ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, + alloc_size, PAGE, &zeroed, &committed, + arena_ind_get(arena)); + } + extent_init(extent, arena, ptr, alloc_size, false, NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed, committed); From bdcf40a6208962008010c30463dc7dbddf3fc564 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Tue, 13 Jun 2017 16:35:35 -0700 Subject: [PATCH 02/19] Add alloc hook test in test/integration/extent. --- test/integration/extent.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/extent.c b/test/integration/extent.c index 7262b803..1dcf2176 100644 --- a/test/integration/extent.c +++ b/test/integration/extent.c @@ -39,10 +39,13 @@ test_extent_body(unsigned arena_ind) { assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), 0, "Unexpected mallctlnametomib() failure"); purge_mib[1] = (size_t)arena_ind; + called_alloc = false; + try_alloc = true; try_dalloc = false; try_decommit = false; p = mallocx(large0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_true(called_alloc, "Expected alloc call"); called_dalloc = false; called_decommit = false; did_purge_lazy = false; From a4d6fe73cf07b3be3af6b7811cfc5950320bb37f Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Wed, 14 Jun 2017 12:12:23 -0700 Subject: [PATCH 03/19] Only abort on dlsym when necessary. If neither background_thread nor lazy_lock is in use, do not abort on dlsym errors. --- .../jemalloc/internal/background_thread_externs.h | 1 + src/background_thread.c | 14 +++++++++++--- src/ctl.c | 7 +++++++ 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/include/jemalloc/internal/background_thread_externs.h b/include/jemalloc/internal/background_thread_externs.h index 7c883697..8b4b8471 100644 --- a/include/jemalloc/internal/background_thread_externs.h +++ b/include/jemalloc/internal/background_thread_externs.h @@ -6,6 +6,7 @@ extern malloc_mutex_t background_thread_lock; extern atomic_b_t background_thread_enabled_state; extern size_t n_background_threads; extern background_thread_info_t *background_thread_info; +extern bool can_enable_background_thread; bool background_thread_create(tsd_t *tsd, unsigned arena_ind); bool background_threads_enable(tsd_t *tsd); diff --git a/src/background_thread.c b/src/background_thread.c index a7403b85..1ff59447 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -20,6 +20,9 @@ size_t n_background_threads; /* Thread info per-index. */ background_thread_info_t *background_thread_info; +/* False if no necessary runtime support. */ +bool can_enable_background_thread; + /******************************************************************************/ #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER @@ -785,9 +788,14 @@ background_thread_boot0(void) { #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { - malloc_write(": Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); + can_enable_background_thread = false; + if (config_lazy_lock || opt_background_thread) { + malloc_write(": Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + } else { + can_enable_background_thread = true; } #endif return false; diff --git a/src/ctl.c b/src/ctl.c index b3ae4aab..f1310cdf 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -1522,6 +1522,13 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, background_thread_enabled_set(tsd_tsdn(tsd), newval); if (newval) { + if (!can_enable_background_thread) { + malloc_printf(": Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } if (background_threads_enable(tsd)) { ret = EFAULT; goto label_return; From 84f6c2cae0fb1399377ef6aea9368444c4987cc6 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Wed, 14 Jun 2017 18:44:13 -0700 Subject: [PATCH 04/19] Log decay->nunpurged before purging. During purging, we may unlock decay->mtx. Therefore we should finish logging decay related counters before attempt to purge. --- src/arena.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/arena.c b/src/arena.c index 0912df31..019dd877 100644 --- a/src/arena.c +++ b/src/arena.c @@ -725,12 +725,13 @@ arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_epoch_advance_helper(decay, time, current_npages); size_t npages_limit = arena_decay_backlog_npages_limit(decay); + /* We may unlock decay->mtx when try_purge(). Finish logging first. */ + decay->nunpurged = (npages_limit > current_npages) ? npages_limit : + current_npages; if (purge) { arena_decay_try_purge(tsdn, arena, decay, extents, current_npages, npages_limit); } - decay->nunpurged = (npages_limit > current_npages) ? npages_limit : - current_npages; } static void From ae93fb08e21284f025871e9f5daccf3d0329b99b Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 15 Jun 2017 15:16:18 -0700 Subject: [PATCH 05/19] Pass tsd to tcache_flush(). --- include/jemalloc/internal/tcache_externs.h | 2 +- src/ctl.c | 2 +- src/tcache.c | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/jemalloc/internal/tcache_externs.h b/include/jemalloc/internal/tcache_externs.h index abe133fa..db3e9c7d 100644 --- a/include/jemalloc/internal/tcache_externs.h +++ b/include/jemalloc/internal/tcache_externs.h @@ -48,7 +48,7 @@ void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); void tcache_prefork(tsdn_t *tsdn); void tcache_postfork_parent(tsdn_t *tsdn); void tcache_postfork_child(tsdn_t *tsdn); -void tcache_flush(void); +void tcache_flush(tsd_t *tsd); bool tsd_tcache_data_init(tsd_t *tsd); bool tsd_tcache_enabled_data_init(tsd_t *tsd); diff --git a/src/ctl.c b/src/ctl.c index f1310cdf..dfbeddda 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -1696,7 +1696,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, READONLY(); WRITEONLY(); - tcache_flush(); + tcache_flush(tsd); ret = 0; label_return: diff --git a/src/tcache.c b/src/tcache.c index 6355805b..936ef314 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -474,8 +474,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { } void -tcache_flush(void) { - tsd_t *tsd = tsd_fetch(); +tcache_flush(tsd_t *tsd) { assert(tcache_available(tsd)); tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); } From 9b1befabbb7a7105501d27843873d14e1c2de54b Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 15 Jun 2017 16:53:22 -0700 Subject: [PATCH 06/19] Add minimal initialized TSD. We use the minimal_initilized tsd (which requires no cleanup) for free() specifically, if tsd hasn't been initialized yet. Any other activity will transit the state from minimal to normal. This is to workaround the case where a thread has no malloc calls in its lifetime until during thread termination, free() happens after tls destructors. --- include/jemalloc/internal/tsd.h | 30 ++++++++++++++++------ src/jemalloc.c | 10 +++++++- src/tsd.c | 44 ++++++++++++++++++++++----------- 3 files changed, 60 insertions(+), 24 deletions(-) diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h index 631fbf1f..155a2ec6 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h @@ -99,9 +99,10 @@ enum { tsd_state_nominal_slow = 1, /* Initialized but on slow path. */ /* the above 2 nominal states should be lower values. */ tsd_state_nominal_max = 1, /* used for comparison only. */ - tsd_state_purgatory = 2, - tsd_state_reincarnated = 3, - tsd_state_uninitialized = 4 + tsd_state_minimal_initialized = 2, + tsd_state_purgatory = 3, + tsd_state_reincarnated = 4, + tsd_state_uninitialized = 5 }; /* Manually limit tsd_state_t to a single byte. */ @@ -190,7 +191,8 @@ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get(tsd_t *tsd) { \ assert(tsd->state == tsd_state_nominal || \ tsd->state == tsd_state_nominal_slow || \ - tsd->state == tsd_state_reincarnated); \ + tsd->state == tsd_state_reincarnated || \ + tsd->state == tsd_state_minimal_initialized); \ return tsd_##n##p_get_unsafe(tsd); \ } MALLOC_TSD @@ -225,7 +227,8 @@ MALLOC_TSD #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t val) { \ - assert(tsd->state != tsd_state_reincarnated); \ + assert(tsd->state != tsd_state_reincarnated && \ + tsd->state != tsd_state_minimal_initialized); \ *tsd_##n##p_get(tsd) = val; \ } MALLOC_TSD @@ -248,7 +251,7 @@ tsd_fast(tsd_t *tsd) { } JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch_impl(bool init, bool internal) { +tsd_fetch_impl(bool init, bool minimal) { tsd_t *tsd = tsd_get(init); if (!init && tsd_get_allocates() && tsd == NULL) { @@ -257,7 +260,7 @@ tsd_fetch_impl(bool init, bool internal) { assert(tsd != NULL); if (unlikely(tsd->state != tsd_state_nominal)) { - return tsd_fetch_slow(tsd, internal); + return tsd_fetch_slow(tsd, minimal); } assert(tsd_fast(tsd)); tsd_assert_fast(tsd); @@ -265,9 +268,20 @@ tsd_fetch_impl(bool init, bool internal) { return tsd; } +/* Get a minimal TSD that requires no cleanup. See comments in free(). */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_min(void) { + return tsd_fetch_impl(true, true); +} + +/* For internal background threads use only. */ JEMALLOC_ALWAYS_INLINE tsd_t * tsd_internal_fetch(void) { - return tsd_fetch_impl(true, true); + tsd_t *tsd = tsd_fetch_min(); + /* Use reincarnated state to prevent full initialization. */ + tsd->state = tsd_state_reincarnated; + + return tsd; } JEMALLOC_ALWAYS_INLINE tsd_t * diff --git a/src/jemalloc.c b/src/jemalloc.c index 52c86aa6..c773cc44 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2264,7 +2264,15 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { - tsd_t *tsd = tsd_fetch(); + /* + * We avoid setting up tsd fully (e.g. tcache, arena binding) + * based on only free() calls -- other activities trigger the + * minimal to full transition. This is because free() may + * happen during thread shutdown after tls deallocation: if a + * thread never had any malloc activities until then, a + * fully-setup tsd won't be destructed properly. + */ + tsd_t *tsd = tsd_fetch_min(); check_entry_exit_locking(tsd_tsdn(tsd)); tcache_t *tcache; diff --git a/src/tsd.c b/src/tsd.c index 97330332..f968992f 100644 --- a/src/tsd.c +++ b/src/tsd.c @@ -87,7 +87,8 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) { static bool tsd_data_init_nocleanup(tsd_t *tsd) { - assert(tsd->state == tsd_state_reincarnated); + assert(tsd->state == tsd_state_reincarnated || + tsd->state == tsd_state_minimal_initialized); /* * During reincarnation, there is no guarantee that the cleanup function * will be called (deallocation may happen after all tsd destructors). @@ -103,15 +104,8 @@ tsd_data_init_nocleanup(tsd_t *tsd) { } tsd_t * -tsd_fetch_slow(tsd_t *tsd, bool internal) { - if (internal) { - /* For internal background threads use only. */ - assert(tsd->state == tsd_state_uninitialized); - tsd->state = tsd_state_reincarnated; - tsd_set(tsd); - tsd_data_init_nocleanup(tsd); - return tsd; - } +tsd_fetch_slow(tsd_t *tsd, bool minimal) { + assert(!tsd_fast(tsd)); if (tsd->state == tsd_state_nominal_slow) { /* On slow path but no work needed. */ @@ -119,11 +113,28 @@ tsd_fetch_slow(tsd_t *tsd, bool internal) { tsd_reentrancy_level_get(tsd) > 0 || *tsd_arenas_tdata_bypassp_get(tsd)); } else if (tsd->state == tsd_state_uninitialized) { - tsd->state = tsd_state_nominal; - tsd_slow_update(tsd); - /* Trigger cleanup handler registration. */ - tsd_set(tsd); - tsd_data_init(tsd); + if (!minimal) { + tsd->state = tsd_state_nominal; + tsd_slow_update(tsd); + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + tsd_data_init(tsd); + } else { + tsd->state = tsd_state_minimal_initialized; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } + } else if (tsd->state == tsd_state_minimal_initialized) { + if (!minimal) { + /* Switch to fully initialized. */ + tsd->state = tsd_state_nominal; + assert(*tsd_reentrancy_levelp_get(tsd) >= 1); + (*tsd_reentrancy_levelp_get(tsd))--; + tsd_slow_update(tsd); + tsd_data_init(tsd); + } else { + assert_tsd_data_cleanup_done(tsd); + } } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); @@ -197,6 +208,9 @@ tsd_cleanup(void *arg) { case tsd_state_uninitialized: /* Do nothing. */ break; + case tsd_state_minimal_initialized: + /* This implies the thread only did free() in its life time. */ + /* Fall through. */ case tsd_state_reincarnated: /* * Reincarnated means another destructor deallocated memory From d35c037e03e1450794dcf595e49a1e1f97f87ac4 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 19 Jun 2017 21:19:15 -0700 Subject: [PATCH 07/19] Clear tcache_ql after fork in child. --- src/arena.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/arena.c b/src/arena.c index 019dd877..d401808b 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2133,6 +2133,23 @@ void arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, false); + } + if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, true); + } + if (config_stats) { + ql_new(&arena->tcache_ql); + tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); + if (tcache != NULL && tcache->arena == arena) { + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + } + } + for (i = 0; i < NBINS; i++) { malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); } From 37f3fa0941022d047fdcd86959d5f94f872e45e2 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Mon, 19 Jun 2017 20:35:33 -0700 Subject: [PATCH 08/19] Mask signals during background thread creation. This prevents signals from being inadvertently delivered to background threads. --- .../internal/jemalloc_internal_decls.h | 1 + src/background_thread.c | 38 +++++++++++++++++-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h index 1efdb56b..8ae5ef48 100644 --- a/include/jemalloc/internal/jemalloc_internal_decls.h +++ b/include/jemalloc/internal/jemalloc_internal_decls.h @@ -22,6 +22,7 @@ # include # endif # include +# include # ifdef JEMALLOC_OS_UNFAIR_LOCK # include # endif diff --git a/src/background_thread.c b/src/background_thread.c index 1ff59447..f0aa04f3 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -347,6 +347,38 @@ background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { static void *background_thread_entry(void *ind_arg); +static int +background_thread_create_signals_masked(pthread_t *thread, + const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { + /* + * Mask signals during thread creation so that the thread inherits + * an empty signal set. + */ + sigset_t set; + sigemptyset(&set); + sigset_t oldset; + int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); + if (mask_err != 0) { + return mask_err; + } + int create_err = pthread_create_wrapper(thread, attr, start_routine, + arg); + /* + * Restore the signal mask. Failure to restore the signal mask here + * changes program behavior. + */ + int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + if (restore_err != 0) { + malloc_printf(": background thread creation " + "failed (%d), and signal mask restoration failed " + "(%d)\n", create_err, restore_err); + if (opt_abort) { + abort(); + } + } + return create_err; +} + static void check_background_thread_creation(tsd_t *tsd, unsigned *n_created, bool *created_threads) { @@ -377,8 +409,8 @@ label_restart: malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); pre_reentrancy(tsd); - int err = pthread_create_wrapper(&info->thread, NULL, - background_thread_entry, (void *)(uintptr_t)i); + int err = background_thread_create_signals_masked(&info->thread, + NULL, background_thread_entry, (void *)(uintptr_t)i); post_reentrancy(tsd); if (err == 0) { @@ -528,7 +560,7 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) { * To avoid complications (besides reentrancy), create internal * background threads with the underlying pthread_create. */ - int err = pthread_create_wrapper(&info->thread, NULL, + int err = background_thread_create_signals_masked(&info->thread, NULL, background_thread_entry, (void *)thread_ind); post_reentrancy(tsd); From 52fc887b49c768a9cee61d9bda9c885efb10fe95 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 22 Jun 2017 13:57:50 -0700 Subject: [PATCH 09/19] Avoid inactivity_check within background threads. Passing is_background_thread down the decay path, so that background thread itself won't attempt inactivity_check. This fixes an issue with background thread doing trylock on a mutex it already owns. --- .../internal/background_thread_inlines.h | 5 ++- src/arena.c | 39 +++++++++++-------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/include/jemalloc/internal/background_thread_inlines.h b/include/jemalloc/internal/background_thread_inlines.h index fd5095f2..ef50231e 100644 --- a/include/jemalloc/internal/background_thread_inlines.h +++ b/include/jemalloc/internal/background_thread_inlines.h @@ -41,8 +41,9 @@ background_thread_indefinite_sleep(background_thread_info_t *info) { } JEMALLOC_ALWAYS_INLINE void -arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena) { - if (!background_thread_enabled()) { +arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread) { + if (!background_thread_enabled() || is_background_thread) { return; } background_thread_info_t *info = diff --git a/src/arena.c b/src/arena.c index d401808b..f9b0f685 100644 --- a/src/arena.c +++ b/src/arena.c @@ -61,7 +61,8 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { */ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, - arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit); + arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, + bool is_background_thread); static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, @@ -378,7 +379,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, if (arena_dirty_decay_ms_get(arena) == 0) { arena_decay_dirty(tsdn, arena, false, true); } else { - arena_background_thread_inactivity_check(tsdn, arena); + arena_background_thread_inactivity_check(tsdn, arena, false); } } @@ -687,10 +688,11 @@ arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, static void arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, size_t current_npages, size_t npages_limit) { + extents_t *extents, size_t current_npages, size_t npages_limit, + bool is_background_thread) { if (current_npages > npages_limit) { arena_decay_to_limit(tsdn, arena, decay, extents, false, - npages_limit); + npages_limit, is_background_thread); } } @@ -720,7 +722,7 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, static void arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, const nstime_t *time, bool purge) { + extents_t *extents, const nstime_t *time, bool is_background_thread) { size_t current_npages = extents_npages_get(extents); arena_decay_epoch_advance_helper(decay, time, current_npages); @@ -728,9 +730,10 @@ arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, /* We may unlock decay->mtx when try_purge(). Finish logging first. */ decay->nunpurged = (npages_limit > current_npages) ? npages_limit : current_npages; - if (purge) { + + if (!background_thread_enabled() || is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, - current_npages, npages_limit); + current_npages, npages_limit, is_background_thread); } } @@ -795,7 +798,7 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, if (decay_ms <= 0) { if (decay_ms == 0) { arena_decay_to_limit(tsdn, arena, decay, extents, false, - 0); + 0, is_background_thread); } return false; } @@ -830,14 +833,13 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, */ bool advance_epoch = arena_decay_deadline_reached(decay, &time); if (advance_epoch) { - bool should_purge = is_background_thread || - !background_thread_enabled(); arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, - should_purge); + is_background_thread); } else if (is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, extents_npages_get(extents), - arena_decay_backlog_npages_limit(decay)); + arena_decay_backlog_npages_limit(decay), + is_background_thread); } return advance_epoch; @@ -916,7 +918,7 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, static size_t arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, - bool all, extent_list_t *decay_extents) { + bool all, extent_list_t *decay_extents, bool is_background_thread) { UNUSED size_t nmadvise, nunmapped; size_t npurged; @@ -946,7 +948,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_muzzy, extent); arena_background_thread_inactivity_check(tsdn, - arena); + arena, is_background_thread); break; } /* Fall through. */ @@ -985,7 +987,8 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, */ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, bool all, size_t npages_limit) { + extents_t *extents, bool all, size_t npages_limit, + bool is_background_thread) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1); malloc_mutex_assert_owner(tsdn, &decay->mtx); @@ -1005,7 +1008,8 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, npages_limit, &decay_extents); if (npurge != 0) { UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, - &extent_hooks, decay, extents, all, &decay_extents); + &extent_hooks, decay, extents, all, &decay_extents, + is_background_thread); assert(npurged == npurge); } @@ -1018,7 +1022,8 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread, bool all) { if (all) { malloc_mutex_lock(tsdn, &decay->mtx); - arena_decay_to_limit(tsdn, arena, decay, extents, all, 0); + arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, + is_background_thread); malloc_mutex_unlock(tsdn, &decay->mtx); return false; From a3f4977217af417b547e34cec3f5bd16874b8aa9 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Fri, 23 Jun 2017 09:58:35 -0700 Subject: [PATCH 10/19] Add thread name for background threads. --- configure.ac | 9 +++++++++ include/jemalloc/internal/jemalloc_internal_defs.h.in | 3 +++ src/background_thread.c | 4 +++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 32ae02c2..770fff56 100644 --- a/configure.ac +++ b/configure.ac @@ -1470,6 +1470,15 @@ if test "x$abi" != "xpecoff" ; then if test "x${je_cv_pthread_atfork}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ]) fi + dnl Check if pthread_setname_np is available with the expected API. + JE_COMPILABLE([pthread_setname_np(3)], [ +#include +], [ + pthread_setname_np(pthread_self(), "setname_test"); +], [je_cv_pthread_setname_np]) + if test "x${je_cv_pthread_setname_np}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ]) + fi fi JE_APPEND_VS(CPPFLAGS, -D_REENTRANT) diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index 2bf9dea1..c0f834f2 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -98,6 +98,9 @@ /* Defined if pthread_atfork(3) is available. */ #undef JEMALLOC_HAVE_PTHREAD_ATFORK +/* Defined if pthread_setname_np(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ diff --git a/src/background_thread.c b/src/background_thread.c index f0aa04f3..ab076fee 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -499,7 +499,9 @@ static void * background_thread_entry(void *ind_arg) { unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; assert(thread_ind < ncpus); - +#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); +#endif if (opt_percpu_arena != percpu_arena_disabled) { set_current_thread_affinity((int)thread_ind); } From d49ac4c7096b79539ce84fa1bfe122bc9e3f1b43 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 23 Jun 2017 10:40:02 -0700 Subject: [PATCH 11/19] Fix assertion typos. Reported by Conrad Meyer. --- src/ctl.c | 2 +- src/jemalloc.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ctl.c b/src/ctl.c index dfbeddda..a647c6c0 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -1970,7 +1970,7 @@ arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { unsigned ind = arena_ind % ncpus; background_thread_info_t *info = &background_thread_info[ind]; - assert(info->state = background_thread_paused); + assert(info->state == background_thread_paused); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_started; malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); diff --git a/src/jemalloc.c b/src/jemalloc.c index c773cc44..aa79633c 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1799,7 +1799,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { */ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || dopts->tcache_ind == TCACHE_IND_NONE); - assert(dopts->arena_ind = ARENA_IND_AUTOMATIC); + assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); dopts->tcache_ind = TCACHE_IND_NONE; /* We know that arena 0 has already been initialized. */ dopts->arena_ind = 0; From d6eb8ac8f30745b06744ad5cb2988a392c4448cd Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 22 Jun 2017 15:36:41 -0700 Subject: [PATCH 12/19] Set reentrancy when invoking customized extent hooks. Customized extent hooks may malloc / free thus trigger reentry. Support this behavior by adding reentrancy on hook functions. --- include/jemalloc/internal/base_externs.h | 2 +- src/arena.c | 4 +- src/base.c | 39 ++++++----- src/extent.c | 84 +++++++++++++++++++++--- test/unit/base.c | 15 ++--- 5 files changed, 109 insertions(+), 35 deletions(-) diff --git a/include/jemalloc/internal/base_externs.h b/include/jemalloc/internal/base_externs.h index 0a1114f4..a4fd5ac7 100644 --- a/include/jemalloc/internal/base_externs.h +++ b/include/jemalloc/internal/base_externs.h @@ -3,7 +3,7 @@ base_t *b0get(void); base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); -void base_delete(base_t *base); +void base_delete(tsdn_t *tsdn, base_t *base); extent_hooks_t *base_extent_hooks_get(base_t *base); extent_hooks_t *base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks); diff --git a/src/arena.c b/src/arena.c index f9b0f685..4e3bd6c1 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1257,7 +1257,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { * Destroy the base allocator, which manages all metadata ever mapped by * this arena. */ - base_delete(arena->base); + base_delete(tsd_tsdn(tsd), arena->base); } static extent_t * @@ -2061,7 +2061,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { return arena; label_error: if (ind != 0) { - base_delete(base); + base_delete(tsdn, base); } return NULL; } diff --git a/src/base.c b/src/base.c index 8e1544fd..22c94338 100644 --- a/src/base.c +++ b/src/base.c @@ -15,7 +15,7 @@ static base_t *b0; /******************************************************************************/ static void * -base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) { +base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { void *addr; bool zero = true; bool commit = true; @@ -25,15 +25,18 @@ base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) { if (extent_hooks == &extent_hooks_default) { addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit); } else { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE, &zero, &commit, ind); + post_reentrancy(tsdn_tsd(tsdn)); } return addr; } static void -base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, +base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) { /* * Cascade through dalloc, decommit, purge_forced, and purge_lazy, @@ -61,27 +64,32 @@ base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, /* Nothing worked. This should never happen. */ not_reached(); } else { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); if (extent_hooks->dalloc != NULL && !extent_hooks->dalloc(extent_hooks, addr, size, true, ind)) { - return; + goto label_done; } if (extent_hooks->decommit != NULL && !extent_hooks->decommit(extent_hooks, addr, size, 0, size, ind)) { - return; + goto label_done; } if (extent_hooks->purge_forced != NULL && !extent_hooks->purge_forced(extent_hooks, addr, size, 0, size, ind)) { - return; + goto label_done; } if (extent_hooks->purge_lazy != NULL && !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, ind)) { - return; + goto label_done; } /* Nothing worked. That's the application's problem. */ + label_done: + post_reentrancy(tsdn_tsd(tsdn)); + return; } } @@ -157,7 +165,7 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent, * On success a pointer to the initialized base_block_t header is returned. */ static base_block_t * -base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind, +base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, size_t alignment) { alignment = ALIGNMENT_CEILING(alignment, QUANTUM); @@ -179,7 +187,7 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind, size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t block_size = (min_block_size > next_block_size) ? min_block_size : next_block_size; - base_block_t *block = (base_block_t *)base_map(extent_hooks, ind, + base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, block_size); if (block == NULL) { return NULL; @@ -207,8 +215,9 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { * called. */ malloc_mutex_unlock(tsdn, &base->mtx); - base_block_t *block = base_block_alloc(extent_hooks, base_ind_get(base), - &base->pind_last, &base->extent_sn_next, size, alignment); + base_block_t *block = base_block_alloc(tsdn, extent_hooks, + base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, + alignment); malloc_mutex_lock(tsdn, &base->mtx); if (block == NULL) { return NULL; @@ -234,8 +243,8 @@ base_t * base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { pszind_t pind_last = 0; size_t extent_sn_next = 0; - base_block_t *block = base_block_alloc(extent_hooks, ind, &pind_last, - &extent_sn_next, sizeof(base_t), QUANTUM); + base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind, + &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); if (block == NULL) { return NULL; } @@ -249,7 +258,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, malloc_mutex_rank_exclusive)) { - base_unmap(extent_hooks, ind, block, block->size); + base_unmap(tsdn, extent_hooks, ind, block, block->size); return NULL; } base->pind_last = pind_last; @@ -272,13 +281,13 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { } void -base_delete(base_t *base) { +base_delete(tsdn_t *tsdn, base_t *base) { extent_hooks_t *extent_hooks = base_extent_hooks_get(base); base_block_t *next = base->blocks; do { base_block_t *block = next; next = block->next; - base_unmap(extent_hooks, base_ind_get(base), block, + base_unmap(tsdn, extent_hooks, base_ind_get(base), block, block->size); } while (next != NULL); } diff --git a/src/extent.c b/src/extent.c index f31ed32e..4b66dd88 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1073,9 +1073,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, &zeroed, &committed, (dss_prec_t)atomic_load_u( &arena->dss_prec, ATOMIC_RELAXED)); } else { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, alloc_size, PAGE, &zeroed, &committed, arena_ind_get(arena)); + post_reentrancy(tsdn_tsd(tsdn)); } extent_init(extent, arena, ptr, alloc_size, false, NSIZES, @@ -1247,8 +1250,11 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, alignment, zero, commit); } else { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, esize, alignment, zero, commit, arena_ind_get(arena)); + post_reentrancy(tsdn_tsd(tsdn)); } if (addr == NULL) { extent_dalloc(tsdn, arena, extent); @@ -1486,10 +1492,13 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, err = extent_dalloc_default_impl(extent_base_get(extent), extent_size_get(extent)); } else { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); err = ((*r_extent_hooks)->dalloc == NULL || (*r_extent_hooks)->dalloc(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena))); + post_reentrancy(tsdn_tsd(tsdn)); } if (!err) { @@ -1515,6 +1524,10 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, } extent_reregister(tsdn, extent); + if (*r_extent_hooks != &extent_hooks_default) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); + } /* Try to decommit; purge if that fails. */ bool zeroed; if (!extent_committed_get(extent)) { @@ -1536,6 +1549,9 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, } else { zeroed = false; } + if (*r_extent_hooks != &extent_hooks_default) { + post_reentrancy(tsdn_tsd(tsdn)); + } extent_zeroed_set(extent, zeroed); if (config_prof) { @@ -1579,9 +1595,12 @@ extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_destroy_default_impl(extent_base_get(extent), extent_size_get(extent)); } else if ((*r_extent_hooks)->destroy != NULL) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); (*r_extent_hooks)->destroy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena)); + post_reentrancy(tsdn_tsd(tsdn)); } extent_dalloc(tsdn, arena, extent); @@ -1602,9 +1621,16 @@ extent_commit_impl(tsdn_t *tsdn, arena_t *arena, WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); + } bool err = ((*r_extent_hooks)->commit == NULL || (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + post_reentrancy(tsdn_tsd(tsdn)); + } extent_committed_set(extent, extent_committed_get(extent) || !err); return err; } @@ -1633,10 +1659,17 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); + } bool err = ((*r_extent_hooks)->decommit == NULL || (*r_extent_hooks)->decommit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + post_reentrancy(tsdn_tsd(tsdn)); + } extent_committed_set(extent, extent_committed_get(extent) && err); return err; } @@ -1663,10 +1696,23 @@ extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); - return ((*r_extent_hooks)->purge_lazy == NULL || - (*r_extent_hooks)->purge_lazy(*r_extent_hooks, + + if ((*r_extent_hooks)->purge_lazy == NULL) { + return true; + } + + if (*r_extent_hooks != &extent_hooks_default) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); + } + bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, - arena_ind_get(arena))); + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + post_reentrancy(tsdn_tsd(tsdn)); + } + + return err; } bool @@ -1699,10 +1745,21 @@ extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, WITNESS_RANK_CORE, growing_retained ? 1 : 0); extent_hooks_assure_initialized(arena, r_extent_hooks); - return ((*r_extent_hooks)->purge_forced == NULL || - (*r_extent_hooks)->purge_forced(*r_extent_hooks, + + if ((*r_extent_hooks)->purge_forced == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); + } + bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, - arena_ind_get(arena))); + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + post_reentrancy(tsdn_tsd(tsdn)); + } + return err; } bool @@ -1771,9 +1828,17 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_lock2(tsdn, extent, trail); - if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), + if (*r_extent_hooks != &extent_hooks_default) { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); + } + bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), size_a + size_b, size_a, size_b, extent_committed_get(extent), - arena_ind_get(arena))) { + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + post_reentrancy(tsdn_tsd(tsdn)); + } + if (err) { goto label_error_c; } @@ -1843,10 +1908,13 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, err = extent_merge_default_impl(extent_base_get(a), extent_base_get(b)); } else { + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn)); err = (*r_extent_hooks)->merge(*r_extent_hooks, extent_base_get(a), extent_size_get(a), extent_base_get(b), extent_size_get(b), extent_committed_get(a), arena_ind_get(arena)); + post_reentrancy(tsdn_tsd(tsdn)); } if (err) { diff --git a/test/unit/base.c b/test/unit/base.c index 5dc42f0a..7fa24ac0 100644 --- a/test/unit/base.c +++ b/test/unit/base.c @@ -27,11 +27,10 @@ static extent_hooks_t hooks_not_null = { }; TEST_BEGIN(test_base_hooks_default) { - tsdn_t *tsdn; base_t *base; size_t allocated0, allocated1, resident, mapped; - tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); if (config_stats) { @@ -49,13 +48,12 @@ TEST_BEGIN(test_base_hooks_default) { "At least 42 bytes were allocated by base_alloc()"); } - base_delete(base); + base_delete(tsdn, base); } TEST_END TEST_BEGIN(test_base_hooks_null) { extent_hooks_t hooks_orig; - tsdn_t *tsdn; base_t *base; size_t allocated0, allocated1, resident, mapped; @@ -68,7 +66,7 @@ TEST_BEGIN(test_base_hooks_null) { memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t)); - tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); base = base_new(tsdn, 0, &hooks); assert_ptr_not_null(base, "Unexpected base_new() failure"); @@ -87,7 +85,7 @@ TEST_BEGIN(test_base_hooks_null) { "At least 42 bytes were allocated by base_alloc()"); } - base_delete(base); + base_delete(tsdn, base); memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); } @@ -95,7 +93,6 @@ TEST_END TEST_BEGIN(test_base_hooks_not_null) { extent_hooks_t hooks_orig; - tsdn_t *tsdn; base_t *base; void *p, *q, *r, *r_exp; @@ -108,7 +105,7 @@ TEST_BEGIN(test_base_hooks_not_null) { memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); - tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); did_alloc = false; base = base_new(tsdn, 0, &hooks); assert_ptr_not_null(base, "Unexpected base_new() failure"); @@ -200,7 +197,7 @@ TEST_BEGIN(test_base_hooks_not_null) { called_dalloc = called_destroy = called_decommit = called_purge_lazy = called_purge_forced = false; - base_delete(base); + base_delete(tsdn, base); assert_true(called_dalloc, "Expected dalloc call"); assert_true(!called_destroy, "Unexpected destroy call"); assert_true(called_decommit, "Expected decommit call"); From 425463a4465043f5f1ccb7f4b257e31ad95b1ed6 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 22 Jun 2017 16:18:30 -0700 Subject: [PATCH 13/19] Check arena in current context in pre_reentrancy. --- .../internal/jemalloc_internal_inlines_a.h | 5 +- src/arena.c | 2 +- src/background_thread.c | 6 +- src/base.c | 13 ++-- src/extent.c | 68 +++++++++---------- src/jemalloc.c | 2 +- src/prof.c | 2 +- 7 files changed, 51 insertions(+), 47 deletions(-) diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/include/jemalloc/internal/jemalloc_internal_inlines_a.h index 854fb1e2..24ea4162 100644 --- a/include/jemalloc/internal/jemalloc_internal_inlines_a.h +++ b/include/jemalloc/internal/jemalloc_internal_inlines_a.h @@ -146,7 +146,10 @@ tcache_get(tsd_t *tsd) { } static inline void -pre_reentrancy(tsd_t *tsd) { +pre_reentrancy(tsd_t *tsd, arena_t *arena) { + /* arena is the current context. Reentry from a0 is not allowed. */ + assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); + bool fast = tsd_fast(tsd); ++*tsd_reentrancy_levelp_get(tsd); if (fast) { diff --git a/src/arena.c b/src/arena.c index 4e3bd6c1..b2830866 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2051,7 +2051,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { * is done enough that we should have tsd. */ assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn), arena); if (hooks_arena_new_hook) { hooks_arena_new_hook(); } diff --git a/src/background_thread.c b/src/background_thread.c index ab076fee..cc5100d1 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -316,7 +316,7 @@ background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { &background_thread_lock); } - pre_reentrancy(tsd); + pre_reentrancy(tsd, NULL); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); bool has_thread; assert(info->state != background_thread_paused); @@ -408,7 +408,7 @@ label_restart: */ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); - pre_reentrancy(tsd); + pre_reentrancy(tsd, NULL); int err = background_thread_create_signals_masked(&info->thread, NULL, background_thread_entry, (void *)(uintptr_t)i); post_reentrancy(tsd); @@ -557,7 +557,7 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) { return false; } - pre_reentrancy(tsd); + pre_reentrancy(tsd, NULL); /* * To avoid complications (besides reentrancy), create internal * background threads with the underlying pthread_create. diff --git a/src/base.c b/src/base.c index 22c94338..97078b13 100644 --- a/src/base.c +++ b/src/base.c @@ -25,11 +25,12 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) if (extent_hooks == &extent_hooks_default) { addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit); } else { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + /* No arena context as we are creating new arenas. */ + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE, &zero, &commit, ind); - post_reentrancy(tsdn_tsd(tsdn)); + post_reentrancy(tsd); } return addr; @@ -64,8 +65,8 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, /* Nothing worked. This should never happen. */ not_reached(); } else { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); if (extent_hooks->dalloc != NULL && !extent_hooks->dalloc(extent_hooks, addr, size, true, ind)) { @@ -88,7 +89,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, } /* Nothing worked. That's the application's problem. */ label_done: - post_reentrancy(tsdn_tsd(tsdn)); + post_reentrancy(tsd); return; } } diff --git a/src/extent.c b/src/extent.c index 4b66dd88..fa45c84d 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1025,6 +1025,18 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, alignment, zero, commit); } +static void +extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, arena); +} + +static void +extent_hook_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + post_reentrancy(tsd); +} + /* * If virtual memory is retained, create increasingly larger extents from which * to split requested extents in order to limit the total number of disjoint @@ -1073,12 +1085,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, &zeroed, &committed, (dss_prec_t)atomic_load_u( &arena->dss_prec, ATOMIC_RELAXED)); } else { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, alloc_size, PAGE, &zeroed, &committed, arena_ind_get(arena)); - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } extent_init(extent, arena, ptr, alloc_size, false, NSIZES, @@ -1250,11 +1261,10 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, alignment, zero, commit); } else { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, esize, alignment, zero, commit, arena_ind_get(arena)); - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } if (addr == NULL) { extent_dalloc(tsdn, arena, extent); @@ -1492,13 +1502,12 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, err = extent_dalloc_default_impl(extent_base_get(extent), extent_size_get(extent)); } else { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); err = ((*r_extent_hooks)->dalloc == NULL || (*r_extent_hooks)->dalloc(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena))); - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } if (!err) { @@ -1525,8 +1534,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_reregister(tsdn, extent); if (*r_extent_hooks != &extent_hooks_default) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); } /* Try to decommit; purge if that fails. */ bool zeroed; @@ -1550,7 +1558,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, zeroed = false; } if (*r_extent_hooks != &extent_hooks_default) { - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } extent_zeroed_set(extent, zeroed); @@ -1595,12 +1603,11 @@ extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_destroy_default_impl(extent_base_get(extent), extent_size_get(extent)); } else if ((*r_extent_hooks)->destroy != NULL) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); (*r_extent_hooks)->destroy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), extent_committed_get(extent), arena_ind_get(arena)); - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } extent_dalloc(tsdn, arena, extent); @@ -1622,14 +1629,13 @@ extent_commit_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); if (*r_extent_hooks != &extent_hooks_default) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); } bool err = ((*r_extent_hooks)->commit == NULL || (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); if (*r_extent_hooks != &extent_hooks_default) { - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } extent_committed_set(extent, extent_committed_get(extent) || !err); return err; @@ -1660,15 +1666,14 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); if (*r_extent_hooks != &extent_hooks_default) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); } bool err = ((*r_extent_hooks)->decommit == NULL || (*r_extent_hooks)->decommit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); if (*r_extent_hooks != &extent_hooks_default) { - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } extent_committed_set(extent, extent_committed_get(extent) && err); return err; @@ -1700,16 +1705,14 @@ extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, if ((*r_extent_hooks)->purge_lazy == NULL) { return true; } - if (*r_extent_hooks != &extent_hooks_default) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } return err; @@ -1750,14 +1753,13 @@ extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, return true; } if (*r_extent_hooks != &extent_hooks_default) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } return err; } @@ -1829,14 +1831,13 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_lock2(tsdn, extent, trail); if (*r_extent_hooks != &extent_hooks_default) { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); } bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), size_a + size_b, size_a, size_b, extent_committed_get(extent), arena_ind_get(arena)); if (*r_extent_hooks != &extent_hooks_default) { - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } if (err) { goto label_error_c; @@ -1908,13 +1909,12 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, err = extent_merge_default_impl(extent_base_get(a), extent_base_get(b)); } else { - assert(!tsdn_null(tsdn)); - pre_reentrancy(tsdn_tsd(tsdn)); + extent_hook_pre_reentrancy(tsdn, arena); err = (*r_extent_hooks)->merge(*r_extent_hooks, extent_base_get(a), extent_size_get(a), extent_base_get(b), extent_size_get(b), extent_committed_get(a), arena_ind_get(arena)); - post_reentrancy(tsdn_tsd(tsdn)); + extent_hook_post_reentrancy(tsdn); } if (err) { diff --git a/src/jemalloc.c b/src/jemalloc.c index aa79633c..7bf23105 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1476,7 +1476,7 @@ malloc_init_hard(void) { malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); /* Set reentrancy level to 1 during init. */ - pre_reentrancy(tsd); + pre_reentrancy(tsd, NULL); /* Initialize narenas before prof_boot2 (for allocation). */ if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) diff --git a/src/prof.c b/src/prof.c index 61dfa2ce..975722c4 100644 --- a/src/prof.c +++ b/src/prof.c @@ -1633,7 +1633,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, return true; } - pre_reentrancy(tsd); + pre_reentrancy(tsd, NULL); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); prof_gctx_tree_t gctxs; From 57beeb2fcb14210bf25e5a79c317e135392cfd86 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 22 Jun 2017 18:58:40 -0700 Subject: [PATCH 14/19] Switch ctl to explicitly use tsd instead of tsdn. --- include/jemalloc/internal/ctl.h | 3 +-- src/ctl.c | 38 ++++++++++++++++----------------- src/jemalloc.c | 9 ++++---- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index f159383d..a91c4cf5 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -91,8 +91,7 @@ typedef struct ctl_arenas_s { int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, - size_t *miblenp); +int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); diff --git a/src/ctl.c b/src/ctl.c index a647c6c0..36bc8fb5 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -622,7 +622,7 @@ arenas_i2a(size_t i) { } static ctl_arena_t * -arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) { +arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { ctl_arena_t *ret; assert(!compat || !init); @@ -635,15 +635,15 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) { ctl_arena_stats_t astats; }; struct container_s *cont = - (struct container_s *)base_alloc(tsdn, b0get(), - sizeof(struct container_s), QUANTUM); + (struct container_s *)base_alloc(tsd_tsdn(tsd), + b0get(), sizeof(struct container_s), QUANTUM); if (cont == NULL) { return NULL; } ret = &cont->ctl_arena; ret->astats = &cont->astats; } else { - ret = (ctl_arena_t *)base_alloc(tsdn, b0get(), + ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), sizeof(ctl_arena_t), QUANTUM); if (ret == NULL) { return NULL; @@ -659,7 +659,7 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) { static ctl_arena_t * arenas_i(size_t i) { - ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false); + ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); assert(ret != NULL); return ret; } @@ -863,7 +863,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, } static unsigned -ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) { +ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { unsigned arena_ind; ctl_arena_t *ctl_arena; @@ -876,12 +876,12 @@ ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) { } /* Trigger stats allocation. */ - if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) { + if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { return UINT_MAX; } /* Initialize new arena. */ - if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) { + if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { return UINT_MAX; } @@ -975,8 +975,9 @@ ctl_refresh(tsdn_t *tsdn) { } static bool -ctl_init(tsdn_t *tsdn) { +ctl_init(tsd_t *tsd) { bool ret; + tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { @@ -1010,14 +1011,14 @@ ctl_init(tsdn_t *tsdn) { * here rather than doing it lazily elsewhere, in order * to limit when OOM-caused errors can occur. */ - if ((ctl_sarena = arenas_i_impl(tsdn, MALLCTL_ARENAS_ALL, false, + if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, true)) == NULL) { ret = true; goto label_return; } ctl_sarena->initialized = true; - if ((ctl_darena = arenas_i_impl(tsdn, MALLCTL_ARENAS_DESTROYED, + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, false, true)) == NULL) { ret = true; goto label_return; @@ -1031,7 +1032,7 @@ ctl_init(tsdn_t *tsdn) { ctl_arenas->narenas = narenas_total_get(); for (i = 0; i < ctl_arenas->narenas; i++) { - if (arenas_i_impl(tsdn, i, false, true) == NULL) { + if (arenas_i_impl(tsd, i, false, true) == NULL) { ret = true; goto label_return; } @@ -1156,7 +1157,7 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } @@ -1180,15 +1181,15 @@ label_return: } int -ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) { +ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (!ctl_initialized && ctl_init(tsdn)) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); label_return: return(ret); } @@ -1200,7 +1201,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, const ctl_named_node_t *node; size_t i; - if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } @@ -2312,8 +2313,7 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, extent_hooks = (extent_hooks_t *)&extent_hooks_default; WRITE(extent_hooks, extent_hooks_t *); - if ((arena_ind = ctl_arena_init(tsd_tsdn(tsd), extent_hooks)) == - UINT_MAX) { + if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { ret = EAGAIN; goto label_return; } diff --git a/src/jemalloc.c b/src/jemalloc.c index 7bf23105..511710cc 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2918,16 +2918,15 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; - tsdn_t *tsdn; if (unlikely(malloc_init())) { return EAGAIN; } - tsdn = tsdn_fetch(); - check_entry_exit_locking(tsdn); - ret = ctl_nametomib(tsdn, name, mibp, miblenp); - check_entry_exit_locking(tsdn); + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_nametomib(tsd, name, mibp, miblenp); + check_entry_exit_locking(tsd_tsdn(tsd)); return ret; } From aa363f9388685a96a0af12b6f4a6dfa20d4243f9 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 26 Jun 2017 11:17:45 -0700 Subject: [PATCH 15/19] Fix pthread_sigmask() usage to block all signals. --- src/background_thread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/background_thread.c b/src/background_thread.c index cc5100d1..eb30eb5b 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -355,7 +355,7 @@ background_thread_create_signals_masked(pthread_t *thread, * an empty signal set. */ sigset_t set; - sigemptyset(&set); + sigfillset(&set); sigset_t oldset; int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); if (mask_err != 0) { From c99e570a48236ba771b61af31febaef6f8b7e887 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 23 Jun 2017 12:35:17 -0700 Subject: [PATCH 16/19] Make sure LG_PAGE <= LG_HUGEPAGE. This resolves #883. --- configure.ac | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/configure.ac b/configure.ac index 770fff56..496631a6 100644 --- a/configure.ac +++ b/configure.ac @@ -1373,6 +1373,10 @@ if test "x${je_cv_lg_hugepage}" = "x" ; then je_cv_lg_hugepage=21 fi fi +if test "x${LG_PAGE}" != "xundefined" -a \ + "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then + AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})]) +fi AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}]) AC_ARG_WITH([lg_page_sizes], From 2b31cf5bd272216e4b20c1463bb696b4c1e9a8e5 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Thu, 29 Jun 2017 13:56:22 -0700 Subject: [PATCH 17/19] Enforce minimum autoconf version (currently 2.68). This resolves #912. --- configure.ac | 1 + 1 file changed, 1 insertion(+) diff --git a/configure.ac b/configure.ac index 496631a6..1551ded8 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,5 @@ dnl Process this file with autoconf to produce a configure script. +AC_PREREQ(2.68) AC_INIT([Makefile.in]) AC_CONFIG_AUX_DIR([build-aux]) From cb032781bdfd778325284472c25172713414023f Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 29 Jun 2017 16:01:35 -0700 Subject: [PATCH 18/19] Add extent_grow_mtx in pre_ / post_fork handlers. This fixed the issue that could cause the child process to stuck after fork. --- include/jemalloc/internal/arena_externs.h | 1 + src/arena.c | 15 +++++++++++---- src/jemalloc.c | 5 ++++- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 3a85bcbb..af16d158 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -90,6 +90,7 @@ void arena_prefork3(tsdn_t *tsdn, arena_t *arena); void arena_prefork4(tsdn_t *tsdn, arena_t *arena); void arena_prefork5(tsdn_t *tsdn, arena_t *arena); void arena_prefork6(tsdn_t *tsdn, arena_t *arena); +void arena_prefork7(tsdn_t *tsdn, arena_t *arena); void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); diff --git a/src/arena.c b/src/arena.c index b2830866..632fce52 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2087,28 +2087,33 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) { void arena_prefork2(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); +} + +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { extents_prefork(tsdn, &arena->extents_dirty); extents_prefork(tsdn, &arena->extents_muzzy); extents_prefork(tsdn, &arena->extents_retained); } void -arena_prefork3(tsdn_t *tsdn, arena_t *arena) { +arena_prefork4(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); } void -arena_prefork4(tsdn_t *tsdn, arena_t *arena) { +arena_prefork5(tsdn_t *tsdn, arena_t *arena) { base_prefork(tsdn, arena->base); } void -arena_prefork5(tsdn_t *tsdn, arena_t *arena) { +arena_prefork6(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->large_mtx); } void -arena_prefork6(tsdn_t *tsdn, arena_t *arena) { +arena_prefork7(tsdn_t *tsdn, arena_t *arena) { for (unsigned i = 0; i < NBINS; i++) { malloc_mutex_prefork(tsdn, &arena->bins[i].lock); } @@ -2127,6 +2132,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { extents_postfork_parent(tsdn, &arena->extents_dirty); extents_postfork_parent(tsdn, &arena->extents_muzzy); extents_postfork_parent(tsdn, &arena->extents_retained); + malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); if (config_stats) { @@ -2164,6 +2170,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { extents_postfork_child(tsdn, &arena->extents_dirty); extents_postfork_child(tsdn, &arena->extents_muzzy); extents_postfork_child(tsdn, &arena->extents_retained); + malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); if (config_stats) { diff --git a/src/jemalloc.c b/src/jemalloc.c index 511710cc..0ee8ad48 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -3049,7 +3049,7 @@ _malloc_prefork(void) background_thread_prefork1(tsd_tsdn(tsd)); } /* Break arena prefork into stages to preserve lock order. */ - for (i = 0; i < 7; i++) { + for (i = 0; i < 8; i++) { for (j = 0; j < narenas; j++) { if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != NULL) { @@ -3075,6 +3075,9 @@ _malloc_prefork(void) case 6: arena_prefork6(tsd_tsdn(tsd), arena); break; + case 7: + arena_prefork7(tsd_tsdn(tsd), arena); + break; default: not_reached(); } } From 284edf02b0de3231357497cf0367f6f64ab07cd8 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Sat, 1 Jul 2017 17:12:05 -0700 Subject: [PATCH 19/19] Update ChangeLog for 5.0.1. --- ChangeLog | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/ChangeLog b/ChangeLog index 98c12f20..ee1b7ead 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,41 @@ brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 5.0.1 (July 1, 2017) + + This bugfix release fixes several issues, most of which are obscure enough + that typical applications are not impacted. + + Bug fixes: + - Update decay->nunpurged before purging, in order to avoid potential update + races and subsequent incorrect purging volume. (@interwq) + - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy + locking and/or background threads). This mitigates an initialization + failure bug for which we still do not have a clear reproduction test case. + (@interwq) + - Modify tsd management so that it neither crashes nor leaks if a thread's + only allocation activity is to call free() after TLS destructors have been + executed. This behavior was observed when operating with GNU libc, and is + unlikely to be an issue with other libc implementations. (@interwq) + - Mask signals during background thread creation. This prevents signals from + being inadvertently delivered to background threads. (@jasone, + @davidgoldblatt, @interwq) + - Avoid inactivity checks within background threads, in order to prevent + recursive mutex acquisition. (@interwq) + - Fix extent_grow_retained() to use the specified hooks when the + arena..extent_hooks mallctl is used to override the default hooks. + (@interwq) + - Add missing reentrancy support for custom extent hooks which allocate. + (@interwq) + - Post-fork(2), re-initialize the list of tcaches associated with each arena + to contain no tcaches except the forking thread's. (@interwq) + - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This + fixes potential deadlocks after fork(2). (@interwq) + - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to + generate corrupt configure scripts. (@jasone) + - Ensure that the configured page size (--with-lg-page) is no larger than the + configured huge page size (--with-lg-hugepage). (@jasone) + * 5.0.0 (June 13, 2017) Unlike all previous jemalloc releases, this release does not use naturally