Fix fork()-related lock rank ordering reversals.

This commit is contained in:
Jason Evans 2016-04-25 23:14:40 -07:00
parent 0d970a054e
commit 174c0c3a9c
11 changed files with 188 additions and 44 deletions

View File

@ -141,6 +141,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/fork.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \

View File

@ -601,7 +601,10 @@ void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
arena_t *arena_new(tsd_t *tsd, unsigned ind);
bool arena_boot(void);
void arena_prefork(tsd_t *tsd, arena_t *arena);
void arena_prefork0(tsd_t *tsd, arena_t *arena);
void arena_prefork1(tsd_t *tsd, arena_t *arena);
void arena_prefork2(tsd_t *tsd, arena_t *arena);
void arena_prefork3(tsd_t *tsd, arena_t *arena);
void arena_postfork_parent(tsd_t *tsd, arena_t *arena);
void arena_postfork_child(tsd_t *tsd, arena_t *arena);

View File

@ -84,7 +84,10 @@ arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork
arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
@ -432,7 +435,8 @@ prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_prefork0
prof_prefork1
prof_realloc
prof_reset
prof_sample_accum_update
@ -583,11 +587,14 @@ valgrind_make_mem_undefined
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_lockless_error
witness_not_owner_error
witness_owner_error
witness_postfork
witness_prefork
witness_unlock
witnesses_cleanup

View File

@ -316,7 +316,8 @@ bool prof_gdump_set(tsd_t *tsd, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork(tsd_t *tsd);
void prof_prefork0(tsd_t *tsd);
void prof_prefork1(tsd_t *tsd);
void prof_postfork_parent(tsd_t *tsd);
void prof_postfork_child(tsd_t *tsd);
void prof_sample_threshold_update(prof_tdata_t *tdata);

View File

@ -544,6 +544,7 @@ struct tsd_init_head_s {
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@ -558,7 +559,8 @@ struct tsd_init_head_s {
false, \
tcache_enabled_default, \
NULL, \
ql_head_initializer(witnesses) \
ql_head_initializer(witnesses), \
false \
}
struct tsd_s {

View File

@ -94,6 +94,9 @@ extern witness_lockless_error_t *witness_lockless_error;
void witness_assert_lockless(tsd_t *tsd);
void witnesses_cleanup(tsd_t *tsd);
void witness_fork_cleanup(tsd_t *tsd);
void witness_prefork(tsd_t *tsd);
void witness_postfork(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -3822,16 +3822,34 @@ arena_boot(void)
}
void
arena_prefork(tsd_t *tsd, arena_t *arena)
arena_prefork0(tsd_t *tsd, arena_t *arena)
{
malloc_mutex_prefork(tsd, &arena->lock);
}
void
arena_prefork1(tsd_t *tsd, arena_t *arena)
{
malloc_mutex_prefork(tsd, &arena->chunks_mtx);
}
void
arena_prefork2(tsd_t *tsd, arena_t *arena)
{
malloc_mutex_prefork(tsd, &arena->node_cache_mtx);
}
void
arena_prefork3(tsd_t *tsd, arena_t *arena)
{
unsigned i;
malloc_mutex_prefork(tsd, &arena->lock);
malloc_mutex_prefork(tsd, &arena->huge_mtx);
malloc_mutex_prefork(tsd, &arena->chunks_mtx);
malloc_mutex_prefork(tsd, &arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(tsd, &arena->bins[i].lock);
malloc_mutex_prefork(tsd, &arena->huge_mtx);
}
void
@ -3839,11 +3857,11 @@ arena_postfork_parent(tsd_t *tsd, arena_t *arena)
{
unsigned i;
malloc_mutex_postfork_parent(tsd, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(tsd, &arena->bins[i].lock);
malloc_mutex_postfork_parent(tsd, &arena->node_cache_mtx);
malloc_mutex_postfork_parent(tsd, &arena->chunks_mtx);
malloc_mutex_postfork_parent(tsd, &arena->huge_mtx);
malloc_mutex_postfork_parent(tsd, &arena->lock);
}
@ -3852,10 +3870,10 @@ arena_postfork_child(tsd_t *tsd, arena_t *arena)
{
unsigned i;
malloc_mutex_postfork_child(tsd, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(tsd, &arena->bins[i].lock);
malloc_mutex_postfork_child(tsd, &arena->node_cache_mtx);
malloc_mutex_postfork_child(tsd, &arena->chunks_mtx);
malloc_mutex_postfork_child(tsd, &arena->huge_mtx);
malloc_mutex_postfork_child(tsd, &arena->lock);
}

View File

@ -2757,7 +2757,8 @@ _malloc_prefork(void)
#endif
{
tsd_t *tsd;
unsigned i, narenas;
unsigned i, j, narenas;
arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@ -2767,18 +2768,32 @@ _malloc_prefork(void)
tsd = tsd_fetch();
/* Acquire all mutexes in a safe order. */
ctl_prefork(tsd);
prof_prefork(tsd);
malloc_mutex_prefork(tsd, &arenas_lock);
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
narenas = narenas_total_get();
if ((arena = arena_get(tsd, i, false)) != NULL)
arena_prefork(tsd, arena);
/* Acquire all mutexes in a safe order. */
witness_prefork(tsd);
ctl_prefork(tsd);
malloc_mutex_prefork(tsd, &arenas_lock);
prof_prefork0(tsd);
for (i = 0; i < 3; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(tsd, j, false)) != NULL) {
switch (i) {
case 0: arena_prefork0(tsd, arena); break;
case 1: arena_prefork1(tsd, arena); break;
case 2: arena_prefork2(tsd, arena); break;
default: not_reached();
}
}
}
}
chunk_prefork(tsd);
base_prefork(tsd);
chunk_prefork(tsd);
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(tsd, i, false)) != NULL)
arena_prefork3(tsd, arena);
}
prof_prefork1(tsd);
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@ -2801,17 +2816,18 @@ _malloc_postfork(void)
tsd = tsd_fetch();
/* Release all mutexes, now that fork() has completed. */
base_postfork_parent(tsd);
chunk_postfork_parent(tsd);
base_postfork_parent(tsd);
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(tsd, i, false)) != NULL)
arena_postfork_parent(tsd, arena);
}
malloc_mutex_postfork_parent(tsd, &arenas_lock);
prof_postfork_parent(tsd);
malloc_mutex_postfork_parent(tsd, &arenas_lock);
ctl_postfork_parent(tsd);
witness_postfork(tsd);
}
void
@ -2825,17 +2841,18 @@ jemalloc_postfork_child(void)
tsd = tsd_fetch();
/* Release all mutexes, now that fork() has completed. */
base_postfork_child(tsd);
chunk_postfork_child(tsd);
base_postfork_child(tsd);
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(tsd, i, false)) != NULL)
arena_postfork_child(tsd, arena);
}
malloc_mutex_postfork_child(tsd, &arenas_lock);
prof_postfork_child(tsd);
malloc_mutex_postfork_child(tsd, &arenas_lock);
ctl_postfork_child(tsd);
witness_postfork(tsd);
}
/******************************************************************************/

View File

@ -2257,20 +2257,32 @@ prof_boot2(tsd_t *tsd)
}
void
prof_prefork(tsd_t *tsd)
prof_prefork0(tsd_t *tsd)
{
if (opt_prof) {
unsigned i;
malloc_mutex_prefork(tsd, &tdatas_mtx);
malloc_mutex_prefork(tsd, &prof_dump_mtx);
malloc_mutex_prefork(tsd, &bt2gctx_mtx);
malloc_mutex_prefork(tsd, &next_thr_uid_mtx);
malloc_mutex_prefork(tsd, &prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(tsd, &gctx_locks[i]);
malloc_mutex_prefork(tsd, &tdatas_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_prefork(tsd, &tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(tsd, &gctx_locks[i]);
}
}
void
prof_prefork1(tsd_t *tsd)
{
if (opt_prof) {
malloc_mutex_prefork(tsd, &prof_active_mtx);
malloc_mutex_prefork(tsd, &prof_dump_seq_mtx);
malloc_mutex_prefork(tsd, &prof_gdump_mtx);
malloc_mutex_prefork(tsd, &next_thr_uid_mtx);
malloc_mutex_prefork(tsd, &prof_thread_active_init_mtx);
}
}
@ -2281,14 +2293,18 @@ prof_postfork_parent(tsd_t *tsd)
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_parent(tsd, &tdata_locks[i]);
malloc_mutex_postfork_parent(tsd, &prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsd, &next_thr_uid_mtx);
malloc_mutex_postfork_parent(tsd, &prof_gdump_mtx);
malloc_mutex_postfork_parent(tsd, &prof_dump_seq_mtx);
malloc_mutex_postfork_parent(tsd, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(tsd, &gctx_locks[i]);
malloc_mutex_postfork_parent(tsd, &prof_dump_seq_mtx);
malloc_mutex_postfork_parent(tsd, &next_thr_uid_mtx);
malloc_mutex_postfork_parent(tsd, &bt2gctx_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_parent(tsd, &tdata_locks[i]);
malloc_mutex_postfork_parent(tsd, &tdatas_mtx);
malloc_mutex_postfork_parent(tsd, &bt2gctx_mtx);
malloc_mutex_postfork_parent(tsd, &prof_dump_mtx);
}
}
@ -2299,14 +2315,18 @@ prof_postfork_child(tsd_t *tsd)
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_child(tsd, &tdata_locks[i]);
malloc_mutex_postfork_child(tsd, &prof_thread_active_init_mtx);
malloc_mutex_postfork_child(tsd, &next_thr_uid_mtx);
malloc_mutex_postfork_child(tsd, &prof_gdump_mtx);
malloc_mutex_postfork_child(tsd, &prof_dump_seq_mtx);
malloc_mutex_postfork_child(tsd, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(tsd, &gctx_locks[i]);
malloc_mutex_postfork_child(tsd, &prof_dump_seq_mtx);
malloc_mutex_postfork_child(tsd, &next_thr_uid_mtx);
malloc_mutex_postfork_child(tsd, &bt2gctx_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_child(tsd, &tdata_locks[i]);
malloc_mutex_postfork_child(tsd, &tdatas_mtx);
malloc_mutex_postfork_child(tsd, &bt2gctx_mtx);
malloc_mutex_postfork_child(tsd, &prof_dump_mtx);
}
}

View File

@ -48,9 +48,21 @@ witness_lock(tsd_t *tsd, witness_t *witness)
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL && w->rank >= witness->rank && (w->comp == NULL ||
w->comp != witness->comp || w->comp(w, witness) > 0))
if (w == NULL) {
/* No other locks; do nothing. */
} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, witness) > 0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
@ -194,3 +206,24 @@ witnesses_cleanup(tsd_t *tsd)
/* Do nothing. */
}
void
witness_fork_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
witness_prefork(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, true);
}
void
witness_postfork(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, false);
}

39
test/unit/fork.c Normal file
View File

@ -0,0 +1,39 @@
#include "test/jemalloc_test.h"
#include <sys/wait.h>
TEST_BEGIN(test_fork)
{
void *p;
pid_t pid;
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
pid = fork();
if (pid == -1) {
/* Error. */
test_fail("Unexpected fork() failure");
} else if (pid == 0) {
/* Child. */
exit(0);
} else {
int status;
/* Parent. */
free(p);
do {
if (waitpid(pid, &status, 0) == -1)
test_fail("Unexpected waitpid() failure");
} while (!WIFEXITED(status) && !WIFSIGNALED(status));
}
}
TEST_END
int
main(void)
{
return (test(
test_fork));
}