Set reentrancy level to 1 during init.

This makes sure we go down slow path w/ a0 in init.
This commit is contained in:
Qi Wang 2017-05-31 15:21:10 -07:00 committed by Qi Wang
parent 340071f0cf
commit 530c07a45b

View File

@ -1397,6 +1397,18 @@ malloc_init_hard_finish(void) {
return false; return false;
} }
static void
malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
malloc_mutex_assert_owner(tsdn, &init_lock);
malloc_mutex_unlock(tsdn, &init_lock);
if (reentrancy_set) {
assert(!tsdn_null(tsdn));
tsd_t *tsd = tsdn_tsd(tsdn);
assert(tsd_reentrancy_level_get(tsd) > 0);
post_reentrancy(tsd);
}
}
static bool static bool
malloc_init_hard(void) { malloc_init_hard(void) {
tsd_t *tsd; tsd_t *tsd;
@ -1405,15 +1417,18 @@ malloc_init_hard(void) {
_init_init_lock(); _init_init_lock();
#endif #endif
malloc_mutex_lock(TSDN_NULL, &init_lock); malloc_mutex_lock(TSDN_NULL, &init_lock);
#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
malloc_init_hard_cleanup(tsdn, reentrancy); \
return ret;
if (!malloc_init_hard_needed()) { if (!malloc_init_hard_needed()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock); UNLOCK_RETURN(TSDN_NULL, false, false)
return false;
} }
if (malloc_init_state != malloc_init_a0_initialized && if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) { malloc_init_hard_a0_locked()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock); UNLOCK_RETURN(TSDN_NULL, true, false)
return true;
} }
malloc_mutex_unlock(TSDN_NULL, &init_lock); malloc_mutex_unlock(TSDN_NULL, &init_lock);
@ -1425,29 +1440,27 @@ malloc_init_hard(void) {
if (malloc_init_hard_recursible()) { if (malloc_init_hard_recursible()) {
return true; return true;
} }
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
/* Set reentrancy level to 1 during init. */
pre_reentrancy(tsd);
/* Initialize narenas before prof_boot2 (for allocation). */ /* Initialize narenas before prof_boot2 (for allocation). */
if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
return true;
} }
if (config_prof && prof_boot2(tsd)) { if (config_prof && prof_boot2(tsd)) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
return true;
} }
malloc_init_percpu(); malloc_init_percpu();
if (malloc_init_hard_finish()) { if (malloc_init_hard_finish()) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
return true;
} }
post_reentrancy(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
malloc_tsd_boot1();
malloc_tsd_boot1();
/* Update TSD after tsd_boot1. */ /* Update TSD after tsd_boot1. */
tsd = tsd_fetch(); tsd = tsd_fetch();
if (opt_background_thread) { if (opt_background_thread) {
@ -1463,7 +1476,7 @@ malloc_init_hard(void) {
return true; return true;
} }
} }
#undef UNLOCK_RETURN
return false; return false;
} }