Fix a prof-related locking order bug.

Fix a locking order bug that could cause deadlock during fork if heap
profiling were enabled.
This commit is contained in:
Jason Evans
2013-02-06 11:59:30 -08:00
parent 06912756cc
commit 88c222c8e9
5 changed files with 44 additions and 25 deletions

View File

@@ -1338,8 +1338,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert(tbin->ncached == 0);
if (config_prof)
arena_prof_accum(arena, prof_accumbytes);
if (config_prof && arena_prof_accum(arena, prof_accumbytes))
prof_idump();
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1447,8 +1447,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
if (config_prof && isthreaded == false)
arena_prof_accum(arena, size);
if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
prof_idump();
if (zero == false) {
if (config_fill) {
@@ -1475,6 +1475,7 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
UNUSED bool idump;
/* Large allocation. */
size = PAGE_CEILING(size);
@@ -1493,8 +1494,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
arena_prof_accum_locked(arena, size);
idump = arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
if (zero == false) {
if (config_fill) {

View File

@@ -1753,12 +1753,12 @@ _malloc_prefork(void)
/* Acquire all mutexes in a safe order. */
ctl_prefork();
prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
prof_prefork();
chunk_prefork();
base_prefork();
huge_prefork();
@@ -1784,12 +1784,12 @@ _malloc_postfork(void)
huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
prof_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
prof_postfork_parent();
ctl_postfork_parent();
}
@@ -1804,12 +1804,12 @@ jemalloc_postfork_child(void)
huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
prof_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
prof_postfork_child();
ctl_postfork_child();
}

View File

@@ -97,7 +97,8 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
arena_prof_accum(arena, tcache->prof_accumbytes);
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
@@ -174,11 +175,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
UNUSED bool idump;
if (config_prof)
idump = false;
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
arena_prof_accum_locked(arena,
idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
@@ -210,6 +214,8 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
}
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
}
if (config_stats && merged_stats == false) {
/*
@@ -341,8 +347,9 @@ tcache_destroy(tcache_t *tcache)
}
}
if (config_prof && tcache->prof_accumbytes > 0)
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump();
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {