Avoid creating bg thds for huge arena lone.
For low arena count settings, the huge threshold feature may trigger an unwanted bg thd creation. Given that the huge arena does eager purging by default, bypass bg thd creation when initializing the huge arena.
This commit is contained in:
parent
b6f1f2669a
commit
bbe8e6a909
@ -85,6 +85,7 @@ void arena_nthreads_dec(arena_t *arena, bool internal);
|
|||||||
size_t arena_extent_sn_next(arena_t *arena);
|
size_t arena_extent_sn_next(arena_t *arena);
|
||||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||||
bool arena_init_huge(void);
|
bool arena_init_huge(void);
|
||||||
|
bool arena_is_huge(unsigned arena_ind);
|
||||||
arena_t *arena_choose_huge(tsd_t *tsd);
|
arena_t *arena_choose_huge(tsd_t *tsd);
|
||||||
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||||
unsigned *binshard);
|
unsigned *binshard);
|
||||||
|
@ -2127,6 +2127,14 @@ arena_init_huge(void) {
|
|||||||
return huge_enabled;
|
return huge_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
arena_is_huge(unsigned arena_ind) {
|
||||||
|
if (huge_arena_ind == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return (arena_ind == huge_arena_ind);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_boot(sc_data_t *sc_data) {
|
arena_boot(sc_data_t *sc_data) {
|
||||||
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
|
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
|
||||||
|
@ -535,9 +535,8 @@ background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
|
|||||||
n_background_threads++;
|
n_background_threads++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create a new background thread if needed. */
|
static bool
|
||||||
bool
|
background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
|
||||||
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
|
||||||
assert(have_background_thread);
|
assert(have_background_thread);
|
||||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||||
|
|
||||||
@ -590,6 +589,19 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Create a new background thread if needed. */
|
||||||
|
bool
|
||||||
|
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
||||||
|
assert(have_background_thread);
|
||||||
|
|
||||||
|
bool ret;
|
||||||
|
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||||
|
ret = background_thread_create_locked(tsd, arena_ind);
|
||||||
|
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
background_threads_enable(tsd_t *tsd) {
|
background_threads_enable(tsd_t *tsd) {
|
||||||
assert(n_background_threads == 0);
|
assert(n_background_threads == 0);
|
||||||
@ -623,7 +635,7 @@ background_threads_enable(tsd_t *tsd) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return background_thread_create(tsd, 0);
|
return background_thread_create_locked(tsd, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
11
src/ctl.c
11
src/ctl.c
@ -2276,6 +2276,17 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|||||||
ret = EINVAL;
|
ret = EINVAL;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
|
||||||
|
/*
|
||||||
|
* By default the huge arena purges eagerly. If it is
|
||||||
|
* set to non-zero decay time afterwards, background
|
||||||
|
* thread might be needed.
|
||||||
|
*/
|
||||||
|
if (background_thread_create(tsd, arena_ind)) {
|
||||||
|
ret = EFAULT;
|
||||||
|
goto label_return;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
|
if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
|
||||||
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
|
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
|
||||||
arena, *(ssize_t *)newp)) {
|
arena, *(ssize_t *)newp)) {
|
||||||
|
@ -344,12 +344,12 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
|
|||||||
if (ind == 0) {
|
if (ind == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (have_background_thread) {
|
/*
|
||||||
bool err;
|
* Avoid creating a new background thread just for the huge arena, which
|
||||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
* purges eagerly by default.
|
||||||
err = background_thread_create(tsdn_tsd(tsdn), ind);
|
*/
|
||||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
if (have_background_thread && !arena_is_huge(ind)) {
|
||||||
if (err) {
|
if (background_thread_create(tsdn_tsd(tsdn), ind)) {
|
||||||
malloc_printf("<jemalloc>: error in background thread "
|
malloc_printf("<jemalloc>: error in background thread "
|
||||||
"creation for arena %u. Abort.\n", ind);
|
"creation for arena %u. Abort.\n", ind);
|
||||||
abort();
|
abort();
|
||||||
@ -1719,11 +1719,7 @@ malloc_init_hard(void) {
|
|||||||
* sets isthreaded) needs to be called without holding any lock.
|
* sets isthreaded) needs to be called without holding any lock.
|
||||||
*/
|
*/
|
||||||
background_thread_ctl_init(tsd_tsdn(tsd));
|
background_thread_ctl_init(tsd_tsdn(tsd));
|
||||||
|
if (background_thread_create(tsd, 0)) {
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
|
||||||
bool err = background_thread_create(tsd, 0);
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
|
||||||
if (err) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user