Header refactoring: unify and de-catchall witness code.

This commit is contained in:
David Goldblatt
2017-05-22 19:32:04 -07:00
committed by David Goldblatt
parent 36195c8f4d
commit 9f822a1fd7
18 changed files with 602 additions and 538 deletions

View File

@@ -40,7 +40,6 @@
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/witness_types.h"
#include "jemalloc/internal/mutex_types.h"
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_dss_types.h"
@@ -54,7 +53,6 @@
/* STRUCTS */
/******************************************************************************/
#include "jemalloc/internal/witness_structs.h"
#include "jemalloc/internal/mutex_structs.h"
#include "jemalloc/internal/mutex_pool_structs.h"
#include "jemalloc/internal/arena_structs_a.h"
@@ -72,7 +70,6 @@
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/witness_externs.h"
#include "jemalloc/internal/mutex_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/extent_dss_externs.h"
@@ -89,7 +86,6 @@
/* INLINES */
/******************************************************************************/
#include "jemalloc/internal/witness_inlines.h"
#include "jemalloc/internal/mutex_inlines.h"
#include "jemalloc/internal/mutex_pool_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"

View File

@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_INLINES_C_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/witness.h"
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(tsdn_t *tsdn, const void *ptr) {
@@ -25,7 +26,8 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
assert(size != 0);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_internal && likely(ret != NULL)) {
@@ -49,7 +51,8 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
assert(usize == sa2u(usize, alignment));
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
@@ -82,7 +85,8 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
@@ -100,7 +104,8 @@ idalloc(tsd_t *tsd, void *ptr) {
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
}
@@ -108,7 +113,8 @@ JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
void *p;
size_t usize, copysize;
@@ -146,7 +152,8 @@ iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero, tcache_t *tcache, arena_t *arena) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
@@ -174,7 +181,8 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {

View File

@@ -31,14 +31,14 @@ mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
/* Trylock: return false if the lock is successfully acquired. */
static inline bool
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn, &mutex->witness);
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
return true;
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn, &mutex->witness);
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
return false;
}
@@ -69,19 +69,19 @@ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
static inline void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn, &mutex->witness);
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
malloc_mutex_lock_slow(mutex);
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn, &mutex->witness);
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_unlock(tsdn, &mutex->witness);
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
MALLOC_MUTEX_UNLOCK(mutex);
}
@@ -89,12 +89,12 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
static inline void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_owner(tsdn, &mutex->witness);
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn, &mutex->witness);
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
/* Copy the prof data from mutex for processing. */

View File

@@ -4,6 +4,7 @@
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex_inlines.h"
#include "jemalloc/internal/mutex_pool_structs.h"
#include "jemalloc/internal/witness.h"
/*
* This file really combines "inlines" and "externs", but only transitionally.

View File

@@ -3,8 +3,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/witness_types.h"
#include "jemalloc/internal/witness_structs.h"
#include "jemalloc/internal/witness.h"
struct malloc_mutex_s {
union {

View File

@@ -10,8 +10,7 @@
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/witness_types.h"
#include "jemalloc/internal/witness_structs.h"
#include "jemalloc/internal/witness.h"
/*
* Thread-Specific-Data layout
@@ -52,30 +51,29 @@
typedef void (*test_callback_t)(int *);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
# define MALLOC_TEST_TSD \
O(test_data, int) \
O(test_callback, test_callback_t)
O(test_data, int, int) \
O(test_callback, test_callback_t, int)
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
#else
# define MALLOC_TEST_TSD
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
/* O(name, type) */ \
O(tcache_enabled, bool) \
O(arenas_tdata_bypass, bool) \
O(reentrancy_level, int8_t) \
O(narenas_tdata, uint32_t) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(tcache, tcache_t) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(tcache, tcache_t, tcache_t) \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
#define TSD_INITIALIZER { \
@@ -92,8 +90,7 @@ typedef void (*test_callback_t)(int *);
NULL, \
NULL, \
TCACHE_ZERO_INITIALIZER, \
ql_head_initializer(witnesses), \
false \
WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \
}
@@ -119,7 +116,7 @@ struct tsd_s {
* setters below.
*/
tsd_state_t state;
#define O(n, t) \
#define O(n, t, nt) \
t use_a_getter_or_setter_instead_##n;
MALLOC_TSD
#undef O
@@ -135,6 +132,22 @@ struct tsdn_s {
tsd_t tsd;
};
#define TSDN_NULL ((tsdn_t *)0)
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd) {
return (tsdn_t *)tsd;
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn) {
return tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn) {
assert(!tsdn_null(tsdn));
return &tsdn->tsd;
}
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
@@ -166,7 +179,7 @@ void tsd_slow_update(tsd_t *tsd);
* foo. This omits some safety checks, and so can be used during tsd
* initialization and cleanup.
*/
#define O(n, t) \
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->use_a_getter_or_setter_instead_##n; \
@@ -175,7 +188,7 @@ MALLOC_TSD
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
#define O(n, t) \
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \
assert(tsd->state == tsd_state_nominal || \
@@ -186,8 +199,24 @@ tsd_##n##p_get(tsd_t *tsd) { \
MALLOC_TSD
#undef O
/*
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE nt * \
tsdn_##n##p_get(tsdn_t *tsdn) { \
if (tsdn_null(tsdn)) { \
return NULL; \
} \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
#define O(n, t) \
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
@@ -196,7 +225,7 @@ MALLOC_TSD
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
#define O(n, t) \
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t val) { \
*tsd_##n##p_get(tsd) = val; \
@@ -243,11 +272,6 @@ tsd_fetch(void) {
return tsd_fetch_impl(true);
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd) {
return (tsdn_t *)tsd;
}
static inline bool
tsd_nominal(tsd_t *tsd) {
return (tsd->state <= tsd_state_nominal_max);
@@ -262,18 +286,6 @@ tsdn_fetch(void) {
return tsd_tsdn(tsd_fetch_impl(false));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn) {
return tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn) {
assert(!tsdn_null(tsdn));
return &tsdn->tsd;
}
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
tsd_rtree_ctx(tsd_t *tsd) {
return tsd_rtree_ctxp_get(tsd);

View File

@@ -0,0 +1,345 @@
#ifndef JEMALLOC_INTERNAL_WITNESS_H
#define JEMALLOC_INTERNAL_WITNESS_H
#include "jemalloc/internal/ql.h"
/******************************************************************************/
/* LOCK RANKS */
/******************************************************************************/
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
* machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_GCTX 9U
#define WITNESS_RANK_BACKGROUND_THREAD 10U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 11U
#define WITNESS_RANK_DECAY 11U
#define WITNESS_RANK_TCACHE_QL 12U
#define WITNESS_RANK_EXTENTS 13U
#define WITNESS_RANK_EXTENT_FREELIST 14U
#define WITNESS_RANK_EXTENT_POOL 15U
#define WITNESS_RANK_RTREE 16U
#define WITNESS_RANK_BASE 17U
#define WITNESS_RANK_ARENA_LARGE 18U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
/******************************************************************************/
/* PER-WITNESS DATA */
/******************************************************************************/
#if defined(JEMALLOC_DEBUG)
# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
#else
# define WITNESS_INITIALIZER(name, rank)
#endif
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
void *);
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Opaque data, passed to comp(). */
void *opaque;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
/******************************************************************************/
/* PER-THREAD DATA */
/******************************************************************************/
typedef struct witness_tsd_s witness_tsd_t;
struct witness_tsd_s {
witness_list_t witnesses;
bool forking;
};
#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
/******************************************************************************/
/* (PER-THREAD) NULLABILITY HELPERS */
/******************************************************************************/
typedef struct witness_tsdn_s witness_tsdn_t;
struct witness_tsdn_s {
witness_tsd_t witness_tsd;
};
JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
return (witness_tsdn_t *)witness_tsd;
}
JEMALLOC_ALWAYS_INLINE bool
witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
return witness_tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE witness_tsd_t *
witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
assert(!witness_tsdn_null(witness_tsdn));
return &witness_tsdn->witness_tsd;
}
/******************************************************************************/
/* API */
/******************************************************************************/
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp, void *opaque);
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
void witnesses_cleanup(witness_tsd_t *witness_tsd);
void witness_prefork(witness_tsd_t *witness_tsd);
void witness_postfork_parent(witness_tsd_t *witness_tsd);
void witness_postfork_child(witness_tsd_t *witness_tsd);
/* Helper, not intended for direct use. */
static inline bool
witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
witnesses = &witness_tsd->witnesses;
ql_foreach(w, witnesses, link) {
if (w == witness) {
return true;
}
}
return false;
}
static inline void
witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
witness_tsd_t *witness_tsd;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
if (witness_owner(witness_tsd, witness)) {
return;
}
witness_owner_error(witness);
}
static inline void
witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
const witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
witnesses = &witness_tsd->witnesses;
ql_foreach(w, witnesses, link) {
if (w == witness) {
witness_not_owner_error(witness);
}
}
}
static inline void
witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
witness_rank_t rank_inclusive, unsigned depth) {
witness_tsd_t *witness_tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
d = 0;
witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
break;
}
d++;
}
}
if (d != depth) {
witness_depth_error(witnesses, rank_inclusive, depth);
}
}
static inline void
witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
}
static inline void
witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
witness_assert_depth(witness_tsdn, 0);
}
static inline void
witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
witness_assert_not_owner(witness_tsdn, witness);
witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (witness_tsd->forking && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
static inline void
witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if (witness_owner(witness_tsd, witness)) {
witnesses = &witness_tsd->witnesses;
ql_remove(witnesses, witness, link);
} else {
witness_assert_owner(witness_tsdn, witness);
}
}
#endif /* JEMALLOC_INTERNAL_WITNESS_H */

View File

@@ -1,25 +0,0 @@
#ifndef JEMALLOC_INTERNAL_WITNESS_EXTERNS_H
#define JEMALLOC_INTERNAL_WITNESS_EXTERNS_H
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp, void *opaque);
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
void witnesses_cleanup(tsd_t *tsd);
void witness_prefork(tsd_t *tsd);
void witness_postfork_parent(tsd_t *tsd);
void witness_postfork_child(tsd_t *tsd);
#endif /* JEMALLOC_INTERNAL_WITNESS_EXTERNS_H */

View File

@@ -1,188 +0,0 @@
#ifndef JEMALLOC_INTERNAL_WITNESS_INLINES_H
#define JEMALLOC_INTERNAL_WITNESS_INLINES_H
#include "jemalloc/internal/ql.h"
/* Helper, not intended for direct use. */
static inline bool
witness_owner(tsd_t *tsd, const witness_t *witness) {
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness) {
return true;
}
}
return false;
}
static inline void
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) {
tsd_t *tsd;
if (!config_debug) {
return;
}
if (tsdn_null(tsdn)) {
return;
}
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
if (witness_owner(tsd, witness)) {
return;
}
witness_owner_error(witness);
}
static inline void
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) {
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (tsdn_null(tsdn)) {
return;
}
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness) {
witness_not_owner_error(witness);
}
}
}
static inline void
witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth) {
tsd_t *tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (tsdn_null(tsdn)) {
return;
}
tsd = tsdn_tsd(tsdn);
d = 0;
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
break;
}
d++;
}
}
if (d != depth) {
witness_depth_error(witnesses, rank_inclusive, depth);
}
}
static inline void
witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
}
static inline void
witness_assert_lockless(tsdn_t *tsdn) {
witness_assert_depth(tsdn, 0);
}
static inline void
witness_lock(tsdn_t *tsdn, witness_t *witness) {
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug) {
return;
}
if (tsdn_null(tsdn)) {
return;
}
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
witness_assert_not_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
static inline void
witness_unlock(tsdn_t *tsdn, witness_t *witness) {
tsd_t *tsd;
witness_list_t *witnesses;
if (!config_debug) {
return;
}
if (tsdn_null(tsdn)) {
return;
}
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT) {
return;
}
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if (witness_owner(tsd, witness)) {
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
} else {
witness_assert_owner(tsdn, witness);
}
}
#endif /* JEMALLOC_INTERNAL_WITNESS_INLINES_H */

View File

@@ -1,28 +0,0 @@
#ifndef JEMALLOC_INTERNAL_WITNESS_STRUCTS_H
#define JEMALLOC_INTERNAL_WITNESS_STRUCTS_H
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Opaque data, passed to comp(). */
void *opaque;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
#endif /* JEMALLOC_INTERNAL_WITNESS_STRUCTS_H */

View File

@@ -1,70 +0,0 @@
#ifndef JEMALLOC_INTERNAL_WITNESS_TYPES_H
#define JEMALLOC_INTERNAL_WITNESS_TYPES_H
#include "jemalloc/internal/ql.h"
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
void *);
/*
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
* the witness machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_GCTX 9U
#define WITNESS_RANK_BACKGROUND_THREAD 10U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 11U
#define WITNESS_RANK_DECAY 11U
#define WITNESS_RANK_TCACHE_QL 12U
#define WITNESS_RANK_EXTENTS 13U
#define WITNESS_RANK_EXTENT_FREELIST 14U
#define WITNESS_RANK_EXTENT_POOL 15U
#define WITNESS_RANK_RTREE 16U
#define WITNESS_RANK_BASE 17U
#define WITNESS_RANK_ARENA_LARGE 18U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#if defined(JEMALLOC_DEBUG)
# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
#else
# define WITNESS_INITIALIZER(name, rank)
#endif
#endif /* JEMALLOC_INTERNAL_WITNESS_TYPES_H */