Fix arena_prefork lock rank order for witness.

When witness is enabled, lock rank order needs to be preserved during
prefork, not only for each arena, but also across arenas. This change
breaks arena_prefork into further stages to ensure valid rank order
across arenas. Also changed test/unit/fork to use a manual arena to
catch this case.
This commit is contained in:
Qi Wang 2017-03-08 13:00:42 -08:00 committed by Qi Wang
parent 8adab26972
commit 8721e19c04
5 changed files with 49 additions and 12 deletions

View File

@ -82,6 +82,9 @@ void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);

View File

@ -51,6 +51,9 @@ arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prefork4
arena_prefork5
arena_prefork6
arena_prof_accum
arena_prof_promote
arena_prof_tctx_get

View File

@ -1743,29 +1743,39 @@ arena_boot(void) {
void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->decay.mtx);
}
void
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
if (config_stats && config_tcache) {
malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
}
}
void
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
extents_prefork(tsdn, &arena->extents_cached);
extents_prefork(tsdn, &arena->extents_retained);
}
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_freelist_mtx);
}
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
base_prefork(tsdn, arena->base);
}
void
arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->large_mtx);
for (i = 0; i < NBINS; i++) {
}
void
arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
for (unsigned i = 0; i < NBINS; i++) {
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
}
}

View File

@ -2773,7 +2773,8 @@ _malloc_prefork(void)
tcache_prefork(tsd_tsdn(tsd));
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
prof_prefork0(tsd_tsdn(tsd));
for (i = 0; i < 3; i++) {
/* Break arena prefork into stages to preserve lock order. */
for (i = 0; i < 7; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
NULL) {
@ -2787,16 +2788,23 @@ _malloc_prefork(void)
case 2:
arena_prefork2(tsd_tsdn(tsd), arena);
break;
case 3:
arena_prefork3(tsd_tsdn(tsd), arena);
break;
case 4:
arena_prefork4(tsd_tsdn(tsd), arena);
break;
case 5:
arena_prefork5(tsd_tsdn(tsd), arena);
break;
case 6:
arena_prefork6(tsd_tsdn(tsd), arena);
break;
default: not_reached();
}
}
}
}
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
arena_prefork3(tsd_tsdn(tsd), arena);
}
}
prof_prefork1(tsd_tsdn(tsd));
}

View File

@ -9,6 +9,19 @@ TEST_BEGIN(test_fork) {
void *p;
pid_t pid;
/* Set up a manually managed arena for test. */
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
/* Migrate to the new arena. */
unsigned old_arena_ind;
sz = sizeof(old_arena_ind);
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");