Add internal tsd for background_thread.

This commit is contained in:
Qi Wang 2017-06-07 16:12:50 -07:00 committed by Qi Wang
parent 73713fbb27
commit 5642f03cae
3 changed files with 24 additions and 11 deletions

View File

@ -155,7 +155,7 @@ void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void); tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void); void malloc_tsd_boot1(void);
void tsd_cleanup(void *arg); void tsd_cleanup(void *arg);
tsd_t *tsd_fetch_slow(tsd_t *tsd); tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
void tsd_slow_update(tsd_t *tsd); void tsd_slow_update(tsd_t *tsd);
/* /*
@ -250,7 +250,7 @@ tsd_fast(tsd_t *tsd) {
} }
JEMALLOC_ALWAYS_INLINE tsd_t * JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init) { tsd_fetch_impl(bool init, bool internal) {
tsd_t *tsd = tsd_get(init); tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL) { if (!init && tsd_get_allocates() && tsd == NULL) {
@ -259,7 +259,7 @@ tsd_fetch_impl(bool init) {
assert(tsd != NULL); assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) { if (unlikely(tsd->state != tsd_state_nominal)) {
return tsd_fetch_slow(tsd); return tsd_fetch_slow(tsd, internal);
} }
assert(tsd_fast(tsd)); assert(tsd_fast(tsd));
tsd_assert_fast(tsd); tsd_assert_fast(tsd);
@ -267,9 +267,14 @@ tsd_fetch_impl(bool init) {
return tsd; return tsd;
} }
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_internal_fetch(void) {
return tsd_fetch_impl(true, true);
}
JEMALLOC_ALWAYS_INLINE tsd_t * JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void) { tsd_fetch(void) {
return tsd_fetch_impl(true); return tsd_fetch_impl(true, false);
} }
static inline bool static inline bool
@ -283,7 +288,7 @@ tsdn_fetch(void) {
return NULL; return NULL;
} }
return tsd_tsdn(tsd_fetch_impl(false)); return tsd_tsdn(tsd_fetch_impl(false, false));
} }
JEMALLOC_ALWAYS_INLINE rtree_ctx_t * JEMALLOC_ALWAYS_INLINE rtree_ctx_t *

View File

@ -316,12 +316,11 @@ background_thread_entry(void *ind_arg) {
set_current_thread_affinity((int)thread_ind); set_current_thread_affinity((int)thread_ind);
} }
/* /*
* Start periodic background work. We avoid fetching tsd to keep the * Start periodic background work. We use internal tsd which avoids
* background thread "outside", since there may be side effects, for * side effects, for example triggering new arena creation (which in
* example triggering new arena creation (which in turn triggers * turn triggers another background thread creation).
* background thread creation).
*/ */
background_work(TSDN_NULL, thread_ind); background_work(tsd_tsdn(tsd_internal_fetch()), thread_ind);
assert(pthread_equal(pthread_self(), assert(pthread_equal(pthread_self(),
background_thread_info[thread_ind].thread)); background_thread_info[thread_ind].thread));

View File

@ -103,7 +103,16 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
} }
tsd_t * tsd_t *
tsd_fetch_slow(tsd_t *tsd) { tsd_fetch_slow(tsd_t *tsd, bool internal) {
if (internal) {
/* For internal background threads use only. */
assert(tsd->state == tsd_state_uninitialized);
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
tsd_data_init_nocleanup(tsd);
return tsd;
}
if (tsd->state == tsd_state_nominal_slow) { if (tsd->state == tsd_state_nominal_slow) {
/* On slow path but no work needed. */ /* On slow path but no work needed. */
assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || assert(malloc_slow || !tsd_tcache_enabled_get(tsd) ||