Avoid taking large_mtx for auto arenas.

On tcache flush path, we can avoid touching the large_mtx for auto arenas, since
it was only needed for manual arenas where arena_reset is allowed.
This commit is contained in:
Qi Wang 2018-05-29 15:55:04 -07:00 committed by Qi Wang
parent 9bd8deb260
commit c834912aa9
2 changed files with 9 additions and 3 deletions

View File

@ -329,8 +329,9 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
large_dalloc_maybe_junk(extent_addr_get(extent),
extent_usize_get(extent));
} else {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
extent_list_remove(&arena->large, extent);
}
}

View File

@ -212,7 +212,10 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
idump = false;
}
bool lock_large = !arena_is_auto(arena);
if (lock_large) {
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
}
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
@ -236,7 +239,9 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
tbin->tstats.nrequests = 0;
}
}
if (lock_large) {
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
}
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {