Add minimal initialized TSD.

We use the minimal_initilized tsd (which requires no cleanup) for free()
specifically, if tsd hasn't been initialized yet.

Any other activity will transit the state from minimal to normal.  This is to
workaround the case where a thread has no malloc calls in its lifetime until
during thread termination, free() happens after tls destructors.
This commit is contained in:
Qi Wang
2017-06-15 16:53:22 -07:00
committed by Qi Wang
parent ae93fb08e2
commit 9b1befabbb
3 changed files with 60 additions and 24 deletions

View File

@@ -2264,7 +2264,15 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void *ptr) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
/*
* We avoid setting up tsd fully (e.g. tcache, arena binding)
* based on only free() calls -- other activities trigger the
* minimal to full transition. This is because free() may
* happen during thread shutdown after tls deallocation: if a
* thread never had any malloc activities until then, a
* fully-setup tsd won't be destructed properly.
*/
tsd_t *tsd = tsd_fetch_min();
check_entry_exit_locking(tsd_tsdn(tsd));
tcache_t *tcache;

View File

@@ -87,7 +87,8 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) {
static bool
tsd_data_init_nocleanup(tsd_t *tsd) {
assert(tsd->state == tsd_state_reincarnated);
assert(tsd->state == tsd_state_reincarnated ||
tsd->state == tsd_state_minimal_initialized);
/*
* During reincarnation, there is no guarantee that the cleanup function
* will be called (deallocation may happen after all tsd destructors).
@@ -103,15 +104,8 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
}
tsd_t *
tsd_fetch_slow(tsd_t *tsd, bool internal) {
if (internal) {
/* For internal background threads use only. */
assert(tsd->state == tsd_state_uninitialized);
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
tsd_data_init_nocleanup(tsd);
return tsd;
}
tsd_fetch_slow(tsd_t *tsd, bool minimal) {
assert(!tsd_fast(tsd));
if (tsd->state == tsd_state_nominal_slow) {
/* On slow path but no work needed. */
@@ -119,11 +113,28 @@ tsd_fetch_slow(tsd_t *tsd, bool internal) {
tsd_reentrancy_level_get(tsd) > 0 ||
*tsd_arenas_tdata_bypassp_get(tsd));
} else if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
tsd_slow_update(tsd);
/* Trigger cleanup handler registration. */
tsd_set(tsd);
tsd_data_init(tsd);
if (!minimal) {
tsd->state = tsd_state_nominal;
tsd_slow_update(tsd);
/* Trigger cleanup handler registration. */
tsd_set(tsd);
tsd_data_init(tsd);
} else {
tsd->state = tsd_state_minimal_initialized;
tsd_set(tsd);
tsd_data_init_nocleanup(tsd);
}
} else if (tsd->state == tsd_state_minimal_initialized) {
if (!minimal) {
/* Switch to fully initialized. */
tsd->state = tsd_state_nominal;
assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
(*tsd_reentrancy_levelp_get(tsd))--;
tsd_slow_update(tsd);
tsd_data_init(tsd);
} else {
assert_tsd_data_cleanup_done(tsd);
}
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
@@ -197,6 +208,9 @@ tsd_cleanup(void *arg) {
case tsd_state_uninitialized:
/* Do nothing. */
break;
case tsd_state_minimal_initialized:
/* This implies the thread only did free() in its life time. */
/* Fall through. */
case tsd_state_reincarnated:
/*
* Reincarnated means another destructor deallocated memory