Fix a prof-related locking order bug.
Fix a locking order bug that could cause deadlock during fork if heap profiling were enabled.
This commit is contained in:
parent
06912756cc
commit
88c222c8e9
@ -9,6 +9,8 @@ found in the git revision history:
|
||||
* 3.x.x (XXX Not yet released)
|
||||
|
||||
Bug fixes:
|
||||
- Fix a locking order bug that could cause deadlock during fork if heap
|
||||
profiling were enabled.
|
||||
- Fix a chunk recycling bug that could cause the allocator to lose track of
|
||||
whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
|
||||
corruption if allocating via sbrk(2) (unlikely unless running with the
|
||||
|
@ -463,9 +463,9 @@ void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t runind, size_t binind, size_t flags);
|
||||
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t unzeroed);
|
||||
void arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||
void arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
||||
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||
@ -663,7 +663,7 @@ arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
||||
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
JEMALLOC_INLINE bool
|
||||
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
|
||||
@ -672,33 +672,40 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
||||
|
||||
arena->prof_accumbytes += accumbytes;
|
||||
if (arena->prof_accumbytes >= prof_interval) {
|
||||
prof_idump();
|
||||
arena->prof_accumbytes -= prof_interval;
|
||||
return (true);
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
JEMALLOC_INLINE bool
|
||||
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_interval == 0)
|
||||
return;
|
||||
arena_prof_accum_impl(arena, accumbytes);
|
||||
return (false);
|
||||
return (arena_prof_accum_impl(arena, accumbytes));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
JEMALLOC_INLINE bool
|
||||
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_interval == 0)
|
||||
return;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_prof_accum_impl(arena, accumbytes);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return (false);
|
||||
|
||||
{
|
||||
bool ret;
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
ret = arena_prof_accum_impl(arena, accumbytes);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return (ret);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
|
13
src/arena.c
13
src/arena.c
@ -1338,8 +1338,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
|
||||
|
||||
assert(tbin->ncached == 0);
|
||||
|
||||
if (config_prof)
|
||||
arena_prof_accum(arena, prof_accumbytes);
|
||||
if (config_prof && arena_prof_accum(arena, prof_accumbytes))
|
||||
prof_idump();
|
||||
bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
|
||||
@ -1447,8 +1447,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
|
||||
bin->stats.nrequests++;
|
||||
}
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
if (config_prof && isthreaded == false)
|
||||
arena_prof_accum(arena, size);
|
||||
if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
|
||||
prof_idump();
|
||||
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
@ -1475,6 +1475,7 @@ void *
|
||||
arena_malloc_large(arena_t *arena, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
UNUSED bool idump;
|
||||
|
||||
/* Large allocation. */
|
||||
size = PAGE_CEILING(size);
|
||||
@ -1493,8 +1494,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
|
||||
}
|
||||
if (config_prof)
|
||||
arena_prof_accum_locked(arena, size);
|
||||
idump = arena_prof_accum_locked(arena, size);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
|
@ -1753,12 +1753,12 @@ _malloc_prefork(void)
|
||||
|
||||
/* Acquire all mutexes in a safe order. */
|
||||
ctl_prefork();
|
||||
prof_prefork();
|
||||
malloc_mutex_prefork(&arenas_lock);
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_prefork(arenas[i]);
|
||||
}
|
||||
prof_prefork();
|
||||
chunk_prefork();
|
||||
base_prefork();
|
||||
huge_prefork();
|
||||
@ -1784,12 +1784,12 @@ _malloc_postfork(void)
|
||||
huge_postfork_parent();
|
||||
base_postfork_parent();
|
||||
chunk_postfork_parent();
|
||||
prof_postfork_parent();
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_postfork_parent(arenas[i]);
|
||||
}
|
||||
malloc_mutex_postfork_parent(&arenas_lock);
|
||||
prof_postfork_parent();
|
||||
ctl_postfork_parent();
|
||||
}
|
||||
|
||||
@ -1804,12 +1804,12 @@ jemalloc_postfork_child(void)
|
||||
huge_postfork_child();
|
||||
base_postfork_child();
|
||||
chunk_postfork_child();
|
||||
prof_postfork_child();
|
||||
for (i = 0; i < narenas_total; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
arena_postfork_child(arenas[i]);
|
||||
}
|
||||
malloc_mutex_postfork_child(&arenas_lock);
|
||||
prof_postfork_child();
|
||||
ctl_postfork_child();
|
||||
}
|
||||
|
||||
|
15
src/tcache.c
15
src/tcache.c
@ -97,7 +97,8 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
@ -174,11 +175,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
idump = false;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
if (config_prof) {
|
||||
arena_prof_accum_locked(arena,
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
@ -210,6 +214,8 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
}
|
||||
if (config_stats && merged_stats == false) {
|
||||
/*
|
||||
@ -341,8 +347,9 @@ tcache_destroy(tcache_t *tcache)
|
||||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0)
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
|
||||
tcache_size = arena_salloc(tcache, false);
|
||||
if (tcache_size <= SMALL_MAXCLASS) {
|
||||
|
Loading…
Reference in New Issue
Block a user