Relax witness assertions related to prof_gdump().

In some cases the prof machinery allocates (in order to modify the
bt2gctx hash table), and such operations are synchronized via
bt2gctx_mtx.  Rather than asserting that no locks are held on entry
into functions that may call prof_gdump(), make the weaker assertion
that no "core" locks are held.  The prof machinery enqueues dumps
triggered by prof_gdump() calls when bt2gctx_mtx is held, so this
weakened assertion avoids false failures in such cases.
This commit is contained in:
Jason Evans 2017-02-22 20:58:42 -08:00
parent f56cb9a68e
commit 08c24e7c1a
3 changed files with 19 additions and 6 deletions

View File

@ -25,6 +25,14 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
#define WITNESS_RANK_PROF_TDATA 7U
#define WITNESS_RANK_PROF_GCTX 8U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 9U
#define WITNESS_RANK_ARENA 9U
#define WITNESS_RANK_ARENA_CHUNKS 10U
#define WITNESS_RANK_ARENA_NODE_CACHE 11U

View File

@ -591,7 +591,8 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
NULL, chunksize, chunksize, &sn, zero, commit);
@ -633,7 +634,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
size_t sn;
/* prof_gdump() requirement. */
witness_assert_depth(tsdn, 1);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
malloc_mutex_assert_owner(tsdn, &arena->lock);
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,

View File

@ -62,7 +62,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
@ -149,7 +150,8 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed, gdump;
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/* Increase usize to incorporate extra. */
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
@ -223,7 +225,8 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/* Split excess chunks. */
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@ -278,7 +281,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/*
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,