Fix lock order reversal during gdump.

This commit is contained in:
Jason Evans 2017-01-20 17:36:56 -08:00
parent dad74bd3c8
commit b49c649bc1
4 changed files with 61 additions and 30 deletions

View File

@ -52,8 +52,8 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const void *chunk,
const extent_node_t *node);
bool chunk_register(const void *chunk, const extent_node_t *node,
bool *gdump);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,

View File

@ -568,8 +568,8 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t sn, bool zero)
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
bool *gdump)
{
/*
@ -580,7 +580,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
return (chunk_register(chunk, &chunk->node, gdump));
}
static arena_chunk_t *
@ -591,6 +591,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
NULL, chunksize, chunksize, &sn, zero, commit);
@ -603,17 +604,21 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk = NULL;
}
}
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
*zero)) {
if (chunk != NULL) {
bool gdump;
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
chunk_hooks->decommit(chunk, chunksize, 0,
map_bias << LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
chunksize, sn, *zero, *commit);
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
(void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL;
}
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
}
malloc_mutex_lock(tsdn, &arena->lock);
return (chunk);
@ -627,14 +632,24 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t sn;
/* prof_gdump() requirement. */
witness_assert_lock_depth(tsdn, 1);
malloc_mutex_assert_owner(tsdn, &arena->lock);
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, &sn, zero, commit, true);
if (chunk != NULL) {
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
bool gdump;
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, sn, true);
return (NULL);
}
if (config_prof && opt_prof && gdump) {
malloc_mutex_unlock(tsdn, &arena->lock);
prof_gdump(tsdn);
malloc_mutex_lock(tsdn, &arena->lock);
}
}
if (chunk == NULL) {
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,

View File

@ -141,7 +141,7 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
}
bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
chunk_register(const void *chunk, const extent_node_t *node, bool *gdump)
{
assert(extent_node_addr_get(node) == chunk);
@ -160,8 +160,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
*/
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
prof_gdump(tsdn);
*gdump = (cur > high && prof_gdump_get_unlocked());
}
return (false);

View File

@ -15,20 +15,20 @@ huge_node_get(const void *ptr)
}
static bool
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(tsdn, ptr, node));
return (chunk_register(ptr, node, gdump));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
err = huge_node_set(tsdn, ptr, node, gdump);
assert(!err);
}
@ -57,11 +57,12 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_t *iarena;
extent_node_t *node;
size_t sn;
bool is_zeroed;
bool is_zeroed, gdump;
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
@ -91,11 +92,13 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
if (huge_node_set(tsdn, ret, node, &gdump)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
/* Insert node into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx);
@ -144,7 +147,9 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
extent_node_t *node;
arena_t *arena;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed;
bool pre_zeroed, post_zeroed, gdump;
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
/* Increase usize to incorporate extra. */
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
@ -178,10 +183,13 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_node_reset(tsdn, ptr, node, &gdump);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* gdump without any locks held. */
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
@ -207,7 +215,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
arena_t *arena;
chunk_hooks_t chunk_hooks;
size_t cdiff;
bool pre_zeroed, post_zeroed;
bool pre_zeroed, post_zeroed, gdump;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
@ -215,6 +223,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
/* Split excess chunks. */
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@ -241,10 +250,13 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_node_reset(tsdn, ptr, node, &gdump);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* gdump without any locks held. */
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
@ -258,7 +270,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
bool is_zeroed_subchunk, is_zeroed_chunk, gdump;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
@ -266,6 +278,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
/*
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
* update extent's zeroed field, and zero as necessary.
@ -280,8 +294,11 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
extent_node_size_set(node, usize);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
is_zeroed_chunk);
huge_node_reset(tsdn, ptr, node);
huge_node_reset(tsdn, ptr, node, &gdump);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* gdump without any locks held. */
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {