Fix fork-related bugs.

Acquire/release arena bin locks as part of the prefork/postfork.  This
bug made deadlock in the child between fork and exec a possibility.

Split jemalloc_postfork() into jemalloc_postfork_{parent,child}() so
that the child can reinitialize mutexes rather than unlocking them.  In
practice, this bug tended not to cause problems.
This commit is contained in:
Jason Evans
2012-03-13 16:31:41 -07:00
parent 824d34e5b7
commit 4e2e3dd9cf
13 changed files with 194 additions and 38 deletions

View File

@@ -2169,3 +2169,33 @@ arena_boot(void)
bin_info_init();
}
void
arena_prefork(arena_t *arena)
{
unsigned i;
malloc_mutex_prefork(&arena->lock);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock);
}
void
arena_postfork_parent(arena_t *arena)
{
unsigned i;
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock);
malloc_mutex_postfork_parent(&arena->lock);
}
void
arena_postfork_child(arena_t *arena)
{
unsigned i;
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock);
malloc_mutex_postfork_child(&arena->lock);
}

View File

@@ -4,7 +4,7 @@
/******************************************************************************/
/* Data. */
malloc_mutex_t base_mtx;
static malloc_mutex_t base_mtx;
/*
* Current pages that are being used for internal memory allocations. These
@@ -104,3 +104,24 @@ base_boot(void)
return (false);
}
void
base_prefork(void)
{
malloc_mutex_prefork(&base_mtx);
}
void
base_postfork_parent(void)
{
malloc_mutex_postfork_parent(&base_mtx);
}
void
base_postfork_child(void)
{
malloc_mutex_postfork_child(&base_mtx);
}

View File

@@ -3,14 +3,18 @@
/******************************************************************************/
/* Data. */
malloc_mutex_t dss_mtx;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
static void *dss_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
@@ -291,4 +295,28 @@ chunk_dss_boot(void)
return (false);
}
void
chunk_dss_prefork(void)
{
if (config_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (config_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (config_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/

View File

@@ -359,3 +359,24 @@ huge_boot(void)
return (false);
}
void
huge_prefork(void)
{
malloc_mutex_prefork(&huge_mtx);
}
void
huge_postfork_parent(void)
{
malloc_mutex_postfork_parent(&huge_mtx);
}
void
huge_postfork_child(void)
{
malloc_mutex_postfork_child(&huge_mtx);
}

View File

@@ -610,8 +610,8 @@ malloc_init_hard(void)
malloc_conf_init();
/* Register fork handlers. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
jemalloc_postfork) != 0) {
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
@@ -1593,40 +1593,46 @@ jemalloc_prefork(void)
unsigned i;
/* Acquire all mutexes in a safe order. */
malloc_mutex_lock(&arenas_lock);
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
malloc_mutex_lock(&arenas[i]->lock);
arena_prefork(arenas[i]);
}
malloc_mutex_lock(&base_mtx);
malloc_mutex_lock(&huge_mtx);
if (config_dss)
malloc_mutex_lock(&dss_mtx);
base_prefork();
huge_prefork();
chunk_dss_prefork();
}
void
jemalloc_postfork(void)
jemalloc_postfork_parent(void)
{
unsigned i;
/* Release all mutexes, now that fork() has completed. */
if (config_dss)
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(&huge_mtx);
malloc_mutex_unlock(&base_mtx);
chunk_dss_postfork_parent();
huge_postfork_parent();
base_postfork_parent();
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
malloc_mutex_unlock(&arenas[i]->lock);
arena_postfork_parent(arenas[i]);
}
malloc_mutex_unlock(&arenas_lock);
malloc_mutex_postfork_parent(&arenas_lock);
}
void
jemalloc_postfork_child(void)
{
unsigned i;
/* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_child();
huge_postfork_child();
base_postfork_child();
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
}
/******************************************************************************/

View File

@@ -92,3 +92,29 @@ malloc_mutex_destroy(malloc_mutex_t *mutex)
}
#endif
}
void
malloc_mutex_prefork(malloc_mutex_t *mutex)
{
malloc_mutex_lock(mutex);
}
void
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
malloc_mutex_unlock(mutex);
}
void
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
abort();
}
}