Fix lock order reversal during gdump.
This commit is contained in:
parent
dad74bd3c8
commit
b49c649bc1
@ -52,8 +52,8 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
|
|||||||
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
||||||
const chunk_hooks_t *chunk_hooks);
|
const chunk_hooks_t *chunk_hooks);
|
||||||
|
|
||||||
bool chunk_register(tsdn_t *tsdn, const void *chunk,
|
bool chunk_register(const void *chunk, const extent_node_t *node,
|
||||||
const extent_node_t *node);
|
bool *gdump);
|
||||||
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
41
src/arena.c
41
src/arena.c
@ -568,8 +568,8 @@ arena_chunk_init_spare(arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
|
||||||
size_t sn, bool zero)
|
bool *gdump)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -580,7 +580,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|||||||
*/
|
*/
|
||||||
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
|
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
|
||||||
extent_node_achunk_set(&chunk->node, true);
|
extent_node_achunk_set(&chunk->node, true);
|
||||||
return (chunk_register(tsdn, chunk, &chunk->node));
|
return (chunk_register(chunk, &chunk->node, gdump));
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
@ -591,6 +591,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
size_t sn;
|
size_t sn;
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
NULL, chunksize, chunksize, &sn, zero, commit);
|
NULL, chunksize, chunksize, &sn, zero, commit);
|
||||||
@ -603,16 +604,20 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
chunk = NULL;
|
chunk = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
|
if (chunk != NULL) {
|
||||||
*zero)) {
|
bool gdump;
|
||||||
if (!*commit) {
|
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
|
||||||
/* Undo commit of header. */
|
if (!*commit) {
|
||||||
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
|
/* Undo commit of header. */
|
||||||
LG_PAGE, arena->ind);
|
chunk_hooks->decommit(chunk, chunksize, 0,
|
||||||
|
map_bias << LG_PAGE, arena->ind);
|
||||||
|
}
|
||||||
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
|
(void *)chunk, chunksize, sn, *zero, *commit);
|
||||||
|
chunk = NULL;
|
||||||
}
|
}
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
|
if (config_prof && opt_prof && gdump)
|
||||||
chunksize, sn, *zero, *commit);
|
prof_gdump(tsdn);
|
||||||
chunk = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
@ -627,14 +632,24 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
|||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t sn;
|
size_t sn;
|
||||||
|
|
||||||
|
/* prof_gdump() requirement. */
|
||||||
|
witness_assert_lock_depth(tsdn, 1);
|
||||||
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
||||||
chunksize, &sn, zero, commit, true);
|
chunksize, &sn, zero, commit, true);
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
|
bool gdump;
|
||||||
|
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
||||||
chunksize, sn, true);
|
chunksize, sn, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
if (config_prof && opt_prof && gdump) {
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
prof_gdump(tsdn);
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (chunk == NULL) {
|
if (chunk == NULL) {
|
||||||
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
|
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
|
||||||
|
@ -141,7 +141,7 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
|
chunk_register(const void *chunk, const extent_node_t *node, bool *gdump)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(extent_node_addr_get(node) == chunk);
|
assert(extent_node_addr_get(node) == chunk);
|
||||||
@ -160,8 +160,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
|
|||||||
*/
|
*/
|
||||||
high = atomic_read_z(&highchunks);
|
high = atomic_read_z(&highchunks);
|
||||||
}
|
}
|
||||||
if (cur > high && prof_gdump_get_unlocked())
|
*gdump = (cur > high && prof_gdump_get_unlocked());
|
||||||
prof_gdump(tsdn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
|
41
src/huge.c
41
src/huge.c
@ -15,20 +15,20 @@ huge_node_get(const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
|
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(extent_node_addr_get(node) == ptr);
|
assert(extent_node_addr_get(node) == ptr);
|
||||||
assert(!extent_node_achunk_get(node));
|
assert(!extent_node_achunk_get(node));
|
||||||
return (chunk_register(tsdn, ptr, node));
|
return (chunk_register(ptr, node, gdump));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
|
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
err = huge_node_set(tsdn, ptr, node);
|
err = huge_node_set(tsdn, ptr, node, gdump);
|
||||||
assert(!err);
|
assert(!err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,11 +57,12 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
arena_t *iarena;
|
arena_t *iarena;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
size_t sn;
|
size_t sn;
|
||||||
bool is_zeroed;
|
bool is_zeroed, gdump;
|
||||||
|
|
||||||
/* Allocate one or more contiguous chunks for this request. */
|
/* Allocate one or more contiguous chunks for this request. */
|
||||||
|
|
||||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||||
|
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
|
||||||
|
|
||||||
ausize = sa2u(usize, alignment);
|
ausize = sa2u(usize, alignment);
|
||||||
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
|
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
|
||||||
@ -91,11 +92,13 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
|
|
||||||
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
|
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
|
||||||
|
|
||||||
if (huge_node_set(tsdn, ret, node)) {
|
if (huge_node_set(tsdn, ret, node, &gdump)) {
|
||||||
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
|
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, node, NULL, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
if (config_prof && opt_prof && gdump)
|
||||||
|
prof_gdump(tsdn);
|
||||||
|
|
||||||
/* Insert node into huge. */
|
/* Insert node into huge. */
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
@ -144,7 +147,9 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
bool pre_zeroed, post_zeroed;
|
bool pre_zeroed, post_zeroed, gdump;
|
||||||
|
|
||||||
|
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
|
||||||
|
|
||||||
/* Increase usize to incorporate extra. */
|
/* Increase usize to incorporate extra. */
|
||||||
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
|
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
|
||||||
@ -178,10 +183,13 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
huge_node_unset(ptr, node);
|
huge_node_unset(ptr, node);
|
||||||
assert(extent_node_size_get(node) != usize);
|
assert(extent_node_size_get(node) != usize);
|
||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_node_reset(tsdn, ptr, node, &gdump);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_node_zeroed_set(node, post_zeroed);
|
extent_node_zeroed_set(node, post_zeroed);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
/* gdump without any locks held. */
|
||||||
|
if (config_prof && opt_prof && gdump)
|
||||||
|
prof_gdump(tsdn);
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
|
||||||
|
|
||||||
@ -207,7 +215,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_hooks_t chunk_hooks;
|
chunk_hooks_t chunk_hooks;
|
||||||
size_t cdiff;
|
size_t cdiff;
|
||||||
bool pre_zeroed, post_zeroed;
|
bool pre_zeroed, post_zeroed, gdump;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
@ -215,6 +223,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
chunk_hooks = chunk_hooks_get(tsdn, arena);
|
chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
|
|
||||||
assert(oldsize > usize);
|
assert(oldsize > usize);
|
||||||
|
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
|
||||||
|
|
||||||
/* Split excess chunks. */
|
/* Split excess chunks. */
|
||||||
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||||
@ -241,10 +250,13 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
huge_node_unset(ptr, node);
|
huge_node_unset(ptr, node);
|
||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_node_reset(tsdn, ptr, node, &gdump);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_node_zeroed_set(node, post_zeroed);
|
extent_node_zeroed_set(node, post_zeroed);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
/* gdump without any locks held. */
|
||||||
|
if (config_prof && opt_prof && gdump)
|
||||||
|
prof_gdump(tsdn);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
|
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
|
||||||
@ -258,7 +270,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
size_t usize, bool zero) {
|
size_t usize, bool zero) {
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
bool is_zeroed_subchunk, is_zeroed_chunk;
|
bool is_zeroed_subchunk, is_zeroed_chunk, gdump;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
@ -266,6 +278,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
|
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
|
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
|
||||||
* update extent's zeroed field, and zero as necessary.
|
* update extent's zeroed field, and zero as necessary.
|
||||||
@ -280,8 +294,11 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||||
is_zeroed_chunk);
|
is_zeroed_chunk);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_node_reset(tsdn, ptr, node, &gdump);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
/* gdump without any locks held. */
|
||||||
|
if (config_prof && opt_prof && gdump)
|
||||||
|
prof_gdump(tsdn);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed_subchunk) {
|
if (!is_zeroed_subchunk) {
|
||||||
|
Loading…
Reference in New Issue
Block a user