Synchronize arena->tcache_ql with arena->tcache_ql_mtx.

This replaces arena->lock synchronization.
This commit is contained in:
Jason Evans 2017-02-12 18:50:53 -08:00
parent 6b5cba4191
commit ab25d3c987
5 changed files with 31 additions and 21 deletions

View File

@ -135,9 +135,10 @@ struct arena_s {
* Stats from these are merged incrementally, and at exit if
* opt_stats_print is enabled.
*
* Synchronization: lock.
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_t) tcache_ql;
malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
prof_accum_t prof_accum;

View File

@ -35,13 +35,14 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
#define WITNESS_RANK_CORE 9U
#define WITNESS_RANK_ARENA 9U
#define WITNESS_RANK_EXTENTS 10U
#define WITNESS_RANK_EXTENT_FREELIST 11U
#define WITNESS_RANK_TCACHE_QL 10U
#define WITNESS_RANK_EXTENTS 11U
#define WITNESS_RANK_EXTENT_FREELIST 12U
#define WITNESS_RANK_RTREE_ELM 12U
#define WITNESS_RANK_RTREE 13U
#define WITNESS_RANK_BASE 14U
#define WITNESS_RANK_ARENA_LARGE 15U
#define WITNESS_RANK_RTREE_ELM 13U
#define WITNESS_RANK_RTREE 14U
#define WITNESS_RANK_BASE 15U
#define WITNESS_RANK_ARENA_LARGE 16U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF

View File

@ -215,6 +215,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* tcache_bytes counts currently cached bytes. */
astats->tcache_bytes = 0;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_foreach(tcache, &arena->tcache_ql, link) {
for (i = 0; i < nhbins; i++) {
tbin = &tcache->tbins[i];
@ -222,6 +223,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
index2size(i);
}
}
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
for (i = 0; i < NBINS; i++) {
@ -1650,6 +1652,10 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if (config_stats && config_tcache) {
ql_new(&arena->tcache_ql);
if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
WITNESS_RANK_TCACHE_QL)) {
goto label_error;
}
}
if (config_prof) {
@ -1736,6 +1742,9 @@ arena_boot(void) {
void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->lock);
if (config_stats && config_tcache) {
malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
}
}
void
@ -1773,6 +1782,9 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
extents_postfork_parent(tsdn, &arena->extents_cached);
extents_postfork_parent(tsdn, &arena->extents_retained);
malloc_mutex_postfork_parent(tsdn, &arena->lock);
if (config_stats && config_tcache) {
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
}
}
void
@ -1788,4 +1800,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
extents_postfork_child(tsdn, &arena->extents_cached);
extents_postfork_child(tsdn, &arena->extents_retained);
malloc_mutex_postfork_child(tsdn, &arena->lock);
if (config_stats && config_tcache) {
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
}
}

View File

@ -684,17 +684,12 @@ stats_print_atexit(void) {
if (arena != NULL) {
tcache_t *tcache;
/*
* tcache_stats_merge() locks bins, so if any
* code is introduced that acquires both arena
* and bin locks in the opposite order,
* deadlocks may result.
*/
malloc_mutex_lock(tsdn, &arena->lock);
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tsdn, tcache, arena);
}
malloc_mutex_unlock(tsdn, &arena->lock);
malloc_mutex_unlock(tsdn,
&arena->tcache_ql_mtx);
}
}
}

View File

@ -261,10 +261,10 @@ static void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock);
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(tsdn, &arena->lock);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
@ -272,7 +272,7 @@ static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock);
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@ -286,7 +286,7 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
}
ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tsdn, tcache, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
@ -409,8 +409,6 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
cassert(config_stats);
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];