Fix fork(2)-related deadlocks.

Add a library constructor for jemalloc that initializes the allocator.
This fixes a race that could occur if threads were created by the main
thread prior to any memory allocation, followed by fork(2), and then
memory allocation in the child process.

Fix the prefork/postfork functions to acquire/release the ctl, prof, and
rtree mutexes.  This fixes various fork() child process deadlocks, but
one possible deadlock remains (intentionally) unaddressed: prof
backtracing can acquire runtime library mutexes, so deadlock is still
possible if heap profiling is enabled during fork().  This deadlock is
known to be a real issue in at least the case of libgcc-based
backtracing.

Reported by tfengjun.
This commit is contained in:
Jason Evans
2012-10-09 14:46:22 -07:00
parent 7de92767c2
commit 20f1fc95ad
10 changed files with 175 additions and 3 deletions

View File

@@ -318,3 +318,33 @@ chunk_boot(void)
return (false);
}
void
chunk_prefork(void)
{
malloc_mutex_lock(&chunks_mtx);
if (config_ivsalloc)
rtree_prefork(chunks_rtree);
chunk_dss_prefork();
}
void
chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
if (config_ivsalloc)
rtree_postfork_parent(chunks_rtree);
malloc_mutex_postfork_parent(&chunks_mtx);
}
void
chunk_postfork_child(void)
{
chunk_dss_postfork_child();
if (config_ivsalloc)
rtree_postfork_child(chunks_rtree);
malloc_mutex_postfork_child(&chunks_mtx);
}

View File

@@ -827,6 +827,27 @@ ctl_boot(void)
return (false);
}
void
ctl_prefork(void)
{
malloc_mutex_lock(&ctl_mtx);
}
void
ctl_postfork_parent(void)
{
malloc_mutex_postfork_parent(&ctl_mtx);
}
void
ctl_postfork_child(void)
{
malloc_mutex_postfork_child(&ctl_mtx);
}
/******************************************************************************/
/* *_ctl() functions. */

View File

@@ -1614,6 +1614,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork().
*/
/*
* If an application creates a thread before doing any allocation in the main
* thread, then calls fork(2) in the main thread followed by memory allocation
* in the child process, a race can occur that results in deadlock within the
* child: the main thread may have forked while the created thread had
* partially initialized the allocator. Ordinarily jemalloc prevents
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
* constructor is a partial solution to this problem. It may still possible to
* trigger the deadlock described above, but doing so would involve forking via
* a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR(constructor)
static void
jemalloc_constructor(void)
{
malloc_init();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork(void)
@@ -1631,14 +1652,16 @@ _malloc_prefork(void)
assert(malloc_initialized);
/* Acquire all mutexes in a safe order. */
ctl_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
prof_prefork();
base_prefork();
huge_prefork();
chunk_dss_prefork();
chunk_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -1658,14 +1681,16 @@ _malloc_postfork(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_parent();
chunk_postfork_parent();
huge_postfork_parent();
base_postfork_parent();
prof_postfork_parent();
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
ctl_postfork_parent();
}
void
@@ -1676,14 +1701,16 @@ jemalloc_postfork_child(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_child();
chunk_postfork_child();
huge_postfork_child();
base_postfork_child();
prof_postfork_child();
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
ctl_postfork_child();
}
/******************************************************************************/

View File

@@ -1270,4 +1270,46 @@ prof_boot2(void)
return (false);
}
void
prof_prefork(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_lock(&bt2ctx_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_lock(&ctx_locks[i]);
}
}
void
prof_postfork_parent(void)
{
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(&ctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&bt2ctx_mtx);
}
}
void
prof_postfork_child(void)
{
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(&ctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&bt2ctx_mtx);
}
}
/******************************************************************************/

View File

@@ -44,3 +44,24 @@ rtree_new(unsigned bits)
return (ret);
}
void
rtree_prefork(rtree_t *rtree)
{
malloc_mutex_prefork(&rtree->mutex);
}
void
rtree_postfork_parent(rtree_t *rtree)
{
malloc_mutex_postfork_parent(&rtree->mutex);
}
void
rtree_postfork_child(rtree_t *rtree)
{
malloc_mutex_postfork_child(&rtree->mutex);
}