Drop high rank locks when creating threads.
Avoid holding arenas_lock and background_thread_lock when creating background threads, because pthread_create may take internal locks, and potentially cause deadlock with jemalloc internal locks.
This commit is contained in:
parent
00869e39a3
commit
73713fbb27
@ -15,6 +15,7 @@ extern percpu_arena_mode_t opt_percpu_arena;
|
||||
extern const char *percpu_arena_mode_names[];
|
||||
|
||||
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
|
||||
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests);
|
||||
|
11
src/arena.c
11
src/arena.c
@ -2050,17 +2050,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
hooks_arena_new_hook();
|
||||
}
|
||||
post_reentrancy(tsdn_tsd(tsdn));
|
||||
|
||||
/* background_thread_create() handles reentrancy internally. */
|
||||
if (have_background_thread) {
|
||||
bool err;
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
err = background_thread_create(tsdn_tsd(tsdn), ind);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
if (err) {
|
||||
goto label_error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return arena;
|
||||
|
@ -352,12 +352,15 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
/*
|
||||
* To avoid complications (besides reentrancy), create internal
|
||||
* background threads with the underlying pthread_create.
|
||||
* background threads with the underlying pthread_create, and drop
|
||||
* background_thread_lock (pthread_create may take internal locks).
|
||||
*/
|
||||
int err = pthread_create_wrapper(&info->thread, NULL,
|
||||
background_thread_entry, (void *)thread_ind);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
post_reentrancy(tsd);
|
||||
|
||||
if (err != 0) {
|
||||
|
@ -1501,6 +1501,7 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
}
|
||||
background_thread_ctl_init(tsd_tsdn(tsd));
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
if (newp == NULL) {
|
||||
oldval = background_thread_enabled();
|
||||
@ -1535,6 +1536,8 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ unsigned opt_narenas = 0;
|
||||
unsigned ncpus;
|
||||
|
||||
/* Protects arenas initialization. */
|
||||
static malloc_mutex_t arenas_lock;
|
||||
malloc_mutex_t arenas_lock;
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
@ -335,6 +335,25 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
return arena;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
|
||||
if (ind == 0) {
|
||||
return;
|
||||
}
|
||||
/* background_thread_create() handles reentrancy internally. */
|
||||
if (have_background_thread) {
|
||||
bool err;
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
err = background_thread_create(tsdn_tsd(tsdn), ind);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
if (err) {
|
||||
malloc_printf("<jemalloc>: error in background thread "
|
||||
"creation for arena %u. Abort.\n", ind);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
arena_t *
|
||||
arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
arena_t *arena;
|
||||
@ -342,6 +361,9 @@ arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
malloc_mutex_lock(tsdn, &arenas_lock);
|
||||
arena = arena_init_locked(tsdn, ind, extent_hooks);
|
||||
malloc_mutex_unlock(tsdn, &arenas_lock);
|
||||
|
||||
arena_new_create_background_thread(tsdn, ind);
|
||||
|
||||
return arena;
|
||||
}
|
||||
|
||||
@ -475,6 +497,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
|
||||
|
||||
if (narenas_auto > 1) {
|
||||
unsigned i, j, choose[2], first_null;
|
||||
bool is_new_arena[2];
|
||||
|
||||
/*
|
||||
* Determine binding for both non-internal and internal
|
||||
@ -486,6 +509,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
choose[j] = 0;
|
||||
is_new_arena[j] = false;
|
||||
}
|
||||
|
||||
first_null = narenas_auto;
|
||||
@ -545,6 +569,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
|
||||
&arenas_lock);
|
||||
return NULL;
|
||||
}
|
||||
is_new_arena[j] = true;
|
||||
if (!!j == internal) {
|
||||
ret = arena;
|
||||
}
|
||||
@ -552,6 +577,15 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
|
||||
arena_bind(tsd, choose[j], !!j);
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (is_new_arena[j]) {
|
||||
assert(choose[j] > 0);
|
||||
arena_new_create_background_thread(
|
||||
tsd_tsdn(tsd), choose[j]);
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
ret = arena_get(tsd_tsdn(tsd), 0, false);
|
||||
arena_bind(tsd, 0, false);
|
||||
|
Loading…
Reference in New Issue
Block a user