Add witness_assert_depth[_to_rank]().

This makes it possible to make lock state assertions about precisely
which locks are held.
This commit is contained in:
Jason Evans 2017-01-21 15:12:03 -08:00
parent 7034e6baa1
commit f56cb9a68e
7 changed files with 141 additions and 107 deletions

View File

@ -615,14 +615,16 @@ valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_lock_depth
witness_assert_depth
witness_assert_depth_to_rank
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_depth_error
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_lock_depth_error
witness_not_owner_error
witness_owner
witness_owner_error

View File

@ -12,6 +12,8 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
@ -92,12 +94,12 @@ extern witness_not_owner_error_t *witness_not_owner_error;
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_lock_depth_error_t)(const witness_list_t *,
unsigned depth);
extern witness_lock_depth_error_t *witness_lock_depth_error;
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
extern witness_depth_error_t *witness_depth_error;
#else
void witness_lock_depth_error(const witness_list_t *witnesses,
unsigned depth);
void witness_depth_error(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth);
#endif
void witnesses_cleanup(tsd_t *tsd);
@ -114,7 +116,10 @@ void witness_postfork_child(tsd_t *tsd);
bool witness_owner(tsd_t *tsd, const witness_t *witness);
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_lock_depth(tsdn_t *tsdn, unsigned depth);
void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth);
void witness_assert_depth(tsdn_t *tsdn, unsigned depth);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
#endif
@ -126,6 +131,8 @@ witness_owner(tsd_t *tsd, const witness_t *witness)
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
@ -178,8 +185,8 @@ witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
}
JEMALLOC_INLINE void
witness_assert_lock_depth(tsdn_t *tsdn, unsigned depth)
{
witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth) {
tsd_t *tsd;
unsigned d;
witness_list_t *witnesses;
@ -196,12 +203,25 @@ witness_assert_lock_depth(tsdn_t *tsdn, unsigned depth)
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL) {
ql_foreach(w, witnesses, link) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
break;
}
d++;
}
}
if (d != depth)
witness_lock_depth_error(witnesses, depth);
witness_depth_error(witnesses, rank_inclusive, depth);
}
JEMALLOC_INLINE void
witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn) {
witness_assert_depth(tsdn, 0);
}
JEMALLOC_INLINE void

View File

@ -591,7 +591,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
NULL, chunksize, chunksize, &sn, zero, commit);
@ -633,7 +633,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
size_t sn;
/* prof_gdump() requirement. */
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
malloc_mutex_assert_owner(tsdn, &arena->lock);
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,

View File

@ -62,7 +62,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
@ -149,7 +149,7 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed, gdump;
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* Increase usize to incorporate extra. */
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
@ -223,7 +223,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/* Split excess chunks. */
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@ -278,7 +278,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
witness_assert_lock_depth(tsdn, 0); /* prof_gdump() requirement. */
witness_assert_lockless(tsdn); /* prof_gdump() requirement. */
/*
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,

View File

@ -1582,7 +1582,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
tsd = tsd_fetch();
*tsdn = tsd_tsdn(tsd);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
ind = size2index(size);
if (unlikely(ind >= NSIZES))
@ -1620,7 +1620,7 @@ ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
assert(usize == isalloc(tsdn, ret, config_prof));
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@ -1705,7 +1705,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
goto label_oom;
}
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
if (size == 0)
size = 1;
@ -1746,7 +1746,7 @@ label_return:
UTRACE(0, size, result);
JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
false);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
label_oom:
assert(result == NULL);
@ -1756,7 +1756,7 @@ label_oom:
abort();
}
ret = ENOMEM;
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
goto label_return;
}
@ -1874,7 +1874,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@ -1902,7 +1902,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
{
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@ -1948,7 +1948,7 @@ je_realloc(void *ptr, size_t size)
malloc_thread_init();
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind)) {
@ -1995,7 +1995,7 @@ je_realloc(void *ptr, size_t size)
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
old_usize, old_rzsize, maybe, false);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
return (ret);
}
@ -2006,12 +2006,12 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(!malloc_slow))
ifree(tsd, ptr, tcache_get(tsd, false), false);
else
ifree(tsd, ptr, tcache_get(tsd, false), true);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
}
}
@ -2240,7 +2240,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
tsd = tsd_fetch();
*tsdn = tsd_tsdn(tsd);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(flags == 0)) {
szind_t ind = size2index(size);
@ -2375,7 +2375,7 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
@ -2422,7 +2422,7 @@ je_rallocx(void *ptr, size_t size, int flags)
UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
old_usize, old_rzsize, no, zero);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@ -2430,7 +2430,7 @@ label_oom:
abort();
}
UTRACE(ptr, size, 0);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (NULL);
}
@ -2526,7 +2526,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
@ -2567,7 +2567,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
old_usize, old_rzsize, no, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (usize);
}
@ -2582,14 +2582,14 @@ je_sallocx(const void *ptr, int flags)
malloc_thread_init();
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
if (config_ivsalloc)
usize = ivsalloc(tsdn, ptr, config_prof);
else
usize = isalloc(tsdn, ptr, config_prof);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
return (usize);
}
@ -2603,7 +2603,7 @@ je_dallocx(void *ptr, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@ -2617,7 +2617,7 @@ je_dallocx(void *ptr, int flags)
ifree(tsd, ptr, tcache, false);
else
ifree(tsd, ptr, tcache, true);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
}
JEMALLOC_ALWAYS_INLINE_C size_t
@ -2625,13 +2625,13 @@ inallocx(tsdn_t *tsdn, size_t size, int flags)
{
size_t usize;
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
usize = s2u(size);
else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
return (usize);
}
@ -2648,7 +2648,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@ -2662,7 +2662,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
isfree(tsd, ptr, usize, tcache, false);
else
isfree(tsd, ptr, usize, tcache, true);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@ -2678,13 +2678,13 @@ je_nallocx(size_t size, int flags)
return (0);
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
usize = inallocx(tsdn, size, flags);
if (unlikely(usize > HUGE_MAXCLASS))
return (0);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
return (usize);
}
@ -2699,9 +2699,9 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
return (EAGAIN);
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
}
@ -2715,9 +2715,9 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
return (EAGAIN);
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
return (ret);
}
@ -2732,9 +2732,9 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
return (EAGAIN);
tsd = tsd_fetch();
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
}
@ -2745,9 +2745,9 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
tsdn_t *tsdn;
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
stats_print(write_cb, cbopaque, opts);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@ -2760,14 +2760,14 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
malloc_thread_init();
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
if (config_ivsalloc)
ret = ivsalloc(tsdn, ptr, config_prof);
else
ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
return (ret);
}

View File

@ -71,16 +71,16 @@ witness_not_owner_error_t *witness_not_owner_error =
#endif
#ifdef JEMALLOC_JET
#undef witness_lock_depth_error
#define witness_lock_depth_error JEMALLOC_N(n_witness_lock_depth_error)
#undef witness_depth_error
#define witness_depth_error JEMALLOC_N(n_witness_depth_error)
#endif
void
witness_lock_depth_error(const witness_list_t *witnesses, unsigned depth)
{
witness_depth_error(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
witness_t *w;
malloc_printf("<jemalloc>: Should own %u lock%s:", depth, (depth != 1) ?
"s" : "");
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
(depth != 1) ? "s" : "", rank_inclusive);
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
@ -88,17 +88,16 @@ witness_lock_depth_error(const witness_list_t *witnesses, unsigned depth)
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lock_depth_error
#define witness_lock_depth_error JEMALLOC_N(witness_lock_depth_error)
witness_lock_depth_error_t *witness_lock_depth_error =
JEMALLOC_N(n_witness_lock_depth_error);
#undef witness_depth_error
#define witness_depth_error JEMALLOC_N(witness_depth_error)
witness_depth_error_t *witness_depth_error = JEMALLOC_N(n_witness_depth_error);
#endif
void
witnesses_cleanup(tsd_t *tsd)
{
witness_assert_lock_depth(tsd_tsdn(tsd), 0);
witness_assert_lockless(tsd_tsdn(tsd));
/* Do nothing. */
}

View File

@ -3,12 +3,12 @@
static witness_lock_error_t *witness_lock_error_orig;
static witness_owner_error_t *witness_owner_error_orig;
static witness_not_owner_error_t *witness_not_owner_error_orig;
static witness_lock_depth_error_t *witness_lock_depth_error_orig;
static witness_depth_error_t *witness_depth_error_orig;
static bool saw_lock_error;
static bool saw_owner_error;
static bool saw_not_owner_error;
static bool saw_lock_depth_error;
static bool saw_depth_error;
static void
witness_lock_error_intercept(const witness_list_t *witnesses,
@ -33,11 +33,9 @@ witness_not_owner_error_intercept(const witness_t *witness)
}
static void
witness_lock_depth_error_intercept(const witness_list_t *witnesses,
unsigned depth)
{
saw_lock_depth_error = true;
witness_depth_error_intercept(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
saw_depth_error = true;
}
static int
@ -67,25 +65,37 @@ TEST_BEGIN(test_witness)
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 0);
witness_init(&a, "a", 1, NULL);
witness_assert_not_owner(tsdn, &a);
witness_lock(tsdn, &a);
witness_assert_owner(tsdn, &a);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 0);
witness_init(&b, "b", 2, NULL);
witness_assert_not_owner(tsdn, &b);
witness_lock(tsdn, &b);
witness_assert_owner(tsdn, &b);
witness_assert_lock_depth(tsdn, 2);
witness_assert_depth(tsdn, 2);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 2);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)3U, 0);
witness_unlock(tsdn, &a);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)3U, 0);
witness_unlock(tsdn, &b);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 0);
}
TEST_END
@ -98,21 +108,21 @@ TEST_BEGIN(test_witness_comp)
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, witness_comp);
witness_assert_not_owner(tsdn, &a);
witness_lock(tsdn, &a);
witness_assert_owner(tsdn, &a);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
witness_init(&b, "b", 1, witness_comp);
witness_assert_not_owner(tsdn, &b);
witness_lock(tsdn, &b);
witness_assert_owner(tsdn, &b);
witness_assert_lock_depth(tsdn, 2);
witness_assert_depth(tsdn, 2);
witness_unlock(tsdn, &b);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
witness_lock_error_orig = witness_lock_error;
witness_lock_error = witness_lock_error_intercept;
@ -124,7 +134,7 @@ TEST_BEGIN(test_witness_comp)
witness_lock(tsdn, &c);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &c);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
saw_lock_error = false;
@ -134,11 +144,11 @@ TEST_BEGIN(test_witness_comp)
witness_lock(tsdn, &d);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &d);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
witness_unlock(tsdn, &a);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_lock_error = witness_lock_error_orig;
}
@ -157,22 +167,22 @@ TEST_BEGIN(test_witness_reversal)
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
witness_init(&b, "b", 2, NULL);
witness_lock(tsdn, &b);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
assert_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(tsdn, &a);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &a);
witness_assert_lock_depth(tsdn, 1);
witness_assert_depth(tsdn, 1);
witness_unlock(tsdn, &b);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_lock_error = witness_lock_error_orig;
}
@ -195,7 +205,7 @@ TEST_BEGIN(test_witness_recursive)
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
@ -208,7 +218,7 @@ TEST_BEGIN(test_witness_recursive)
witness_unlock(tsdn, &a);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_owner_error = witness_owner_error_orig;
witness_lock_error = witness_lock_error_orig;
@ -229,7 +239,7 @@ TEST_BEGIN(test_witness_unlock_not_owned)
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
@ -237,41 +247,44 @@ TEST_BEGIN(test_witness_unlock_not_owned)
witness_unlock(tsdn, &a);
assert_true(saw_owner_error, "Expected owner error");
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_owner_error = witness_owner_error_orig;
}
TEST_END
TEST_BEGIN(test_witness_lock_depth)
{
TEST_BEGIN(test_witness_depth) {
witness_t a;
tsdn_t *tsdn;
test_skip_if(!config_debug);
witness_lock_depth_error_orig = witness_lock_depth_error;
witness_lock_depth_error = witness_lock_depth_error_intercept;
saw_lock_depth_error = false;
witness_depth_error_orig = witness_depth_error;
witness_depth_error = witness_depth_error_intercept;
saw_depth_error = false;
tsdn = tsdn_fetch();
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_init(&a, "a", 1, NULL);
assert_false(saw_lock_depth_error, "Unexpected lock_depth error");
witness_assert_lock_depth(tsdn, 0);
assert_false(saw_depth_error, "Unexpected depth error");
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_lock(tsdn, &a);
witness_assert_lock_depth(tsdn, 0);
assert_true(saw_lock_depth_error, "Expected lock_depth error");
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
assert_true(saw_depth_error, "Expected depth error");
witness_unlock(tsdn, &a);
witness_assert_lock_depth(tsdn, 0);
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_lock_depth_error = witness_lock_depth_error_orig;
witness_depth_error = witness_depth_error_orig;
}
TEST_END
@ -279,11 +292,11 @@ int
main(void)
{
return (test(
return test(
test_witness,
test_witness_comp,
test_witness_reversal,
test_witness_recursive,
test_witness_unlock_not_owned,
test_witness_lock_depth));
test_witness_depth);
}