Header refactoring: unify and de-catchall mutex module

This commit is contained in:
David Goldblatt 2017-05-23 12:28:19 -07:00 committed by David Goldblatt
parent 9f822a1fd7
commit 18ecbfa89e
34 changed files with 304 additions and 287 deletions

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H #define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */ /* Embedded at the beginning of every block of base-managed virtual memory. */

View File

@ -1,8 +1,9 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H #ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H #define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/rb.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ph.h" #include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rb.h"
extern rtree_t extents_rtree; extern rtree_t extents_rtree;
extern const extent_hooks_t extent_hooks_default; extern const extent_hooks_t extent_hooks_default;

View File

@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H #ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H #define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool_inlines.h" #include "jemalloc/internal/mutex_pool_inlines.h"
#include "jemalloc/internal/pages.h" #include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rb.h" #include "jemalloc/internal/rb.h"
#include "jemalloc/internal/ph.h" #include "jemalloc/internal/ph.h"

View File

@ -40,7 +40,6 @@
/* TYPES */ /* TYPES */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/mutex_types.h"
#include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_dss_types.h" #include "jemalloc/internal/extent_dss_types.h"
#include "jemalloc/internal/base_types.h" #include "jemalloc/internal/base_types.h"
@ -53,7 +52,6 @@
/* STRUCTS */ /* STRUCTS */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/mutex_structs.h"
#include "jemalloc/internal/mutex_pool_structs.h" #include "jemalloc/internal/mutex_pool_structs.h"
#include "jemalloc/internal/arena_structs_a.h" #include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/extent_structs.h"
@ -70,7 +68,6 @@
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/mutex_externs.h"
#include "jemalloc/internal/extent_externs.h" #include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/extent_dss_externs.h" #include "jemalloc/internal/extent_dss_externs.h"
#include "jemalloc/internal/extent_mmap_externs.h" #include "jemalloc/internal/extent_mmap_externs.h"
@ -86,7 +83,6 @@
/* INLINES */ /* INLINES */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/mutex_inlines.h"
#include "jemalloc/internal/mutex_pool_inlines.h" #include "jemalloc/internal/mutex_pool_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_a.h" #include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/rtree_inlines.h" #include "jemalloc/internal/rtree_inlines.h"

View File

@ -0,0 +1,248 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_H
#define JEMALLOC_INTERNAL_MUTEX_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
typedef enum {
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive,
/*
* Can acquire multiple mutexes of the same witness rank, but in
* address-ascending order only.
*/
malloc_mutex_address_ordered
} malloc_mutex_lock_order_t;
typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s {
union {
struct {
/*
* prof_data is defined first to reduce cacheline
* bouncing: the data is not touched by the mutex holder
* during unlocking, while might be modified by
* contenders. Having it before the mutex itself could
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t prof_data;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
};
/*
* We only touch witness when configured w/ debug. However we
* keep the field in a union when !debug so that we don't have
* to pollute the code base with #ifdefs, while avoid paying the
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
static inline void
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
MALLOC_MUTEX_LOCK(mutex);
}
static inline bool
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
return MALLOC_MUTEX_TRYLOCK(mutex);
}
static inline void
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
if (config_stats) {
mutex_prof_data_t *data = &mutex->prof_data;
data->n_lock_ops++;
if (data->prev_owner != tsdn) {
data->prev_owner = tsdn;
data->n_owner_switches++;
}
}
}
/* Trylock: return false if the lock is successfully acquired. */
static inline bool
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
return true;
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
return false;
}
/* Aggregate lock prof data. */
static inline void
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
}
sum->n_wait_times += data->n_wait_times;
sum->n_spin_acquired += data->n_spin_acquired;
if (sum->max_n_thds < data->max_n_thds) {
sum->max_n_thds = data->max_n_thds;
}
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
ATOMIC_RELAXED);
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
&data->n_waiting_thds, ATOMIC_RELAXED);
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
ATOMIC_RELAXED);
sum->n_owner_switches += data->n_owner_switches;
sum->n_lock_ops += data->n_lock_ops;
}
static inline void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
malloc_mutex_lock_slow(mutex);
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
MALLOC_MUTEX_UNLOCK(mutex);
}
}
static inline void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
/* Copy the prof data from mutex for processing. */
static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
*data = *source;
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
}
#endif /* JEMALLOC_INTERNAL_MUTEX_H */

View File

@ -1,21 +0,0 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_EXTERNS_H
#define JEMALLOC_INTERNAL_MUTEX_EXTERNS_H
#include "jemalloc/internal/tsd_types.h"
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif /* JEMALLOC_INTERNAL_MUTEX_EXTERNS_H */

View File

@ -1,118 +0,0 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_INLINES_H
#define JEMALLOC_INTERNAL_MUTEX_INLINES_H
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/tsd_types.h"
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
static inline void
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
MALLOC_MUTEX_LOCK(mutex);
}
static inline bool
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
return MALLOC_MUTEX_TRYLOCK(mutex);
}
static inline void
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
if (config_stats) {
mutex_prof_data_t *data = &mutex->prof_data;
data->n_lock_ops++;
if (data->prev_owner != tsdn) {
data->prev_owner = tsdn;
data->n_owner_switches++;
}
}
}
/* Trylock: return false if the lock is successfully acquired. */
static inline bool
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
return true;
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
return false;
}
/* Aggregate lock prof data. */
static inline void
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
}
sum->n_wait_times += data->n_wait_times;
sum->n_spin_acquired += data->n_spin_acquired;
if (sum->max_n_thds < data->max_n_thds) {
sum->max_n_thds = data->max_n_thds;
}
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
ATOMIC_RELAXED);
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
&data->n_waiting_thds, ATOMIC_RELAXED);
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
ATOMIC_RELAXED);
sum->n_owner_switches += data->n_owner_switches;
sum->n_lock_ops += data->n_lock_ops;
}
static inline void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
malloc_mutex_lock_slow(mutex);
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
if (isthreaded) {
MALLOC_MUTEX_UNLOCK(mutex);
}
}
static inline void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
static inline void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
/* Copy the prof data from mutex for processing. */
static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
*data = *source;
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
}
#endif /* JEMALLOC_INTERNAL_MUTEX_INLINES_H */

View File

@ -2,7 +2,7 @@
#define JEMALLOC_INTERNAL_MUTEX_POOL_INLINES_H #define JEMALLOC_INTERNAL_MUTEX_POOL_INLINES_H
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex_inlines.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool_structs.h" #include "jemalloc/internal/mutex_pool_structs.h"
#include "jemalloc/internal/witness.h" #include "jemalloc/internal/witness.h"

View File

@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H #ifndef JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H #define JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H
#include "jemalloc/internal/mutex.h"
/* This file really combines "structs" and "types", but only transitionally. */ /* This file really combines "structs" and "types", but only transitionally. */
/* We do mod reductions by this value, so it should be kept a power of 2. */ /* We do mod reductions by this value, so it should be kept a power of 2. */

View File

@ -1,55 +0,0 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
#define JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/witness.h"
struct malloc_mutex_s {
union {
struct {
/*
* prof_data is defined first to reduce cacheline
* bouncing: the data is not touched by the mutex holder
* during unlocking, while might be modified by
* contenders. Having it before the mutex itself could
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t prof_data;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
};
/*
* We only touch witness when configured w/ debug. However we
* keep the field in a union when !debug so that we don't have
* to pollute the code base with #ifdefs, while avoid paying the
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t witness;
malloc_mutex_lock_order_t lock_order;
#endif
};
#endif /* JEMALLOC_INTERNAL_MUTEX_STRUCTS_H */

View File

@ -1,71 +0,0 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_TYPES_H
#define JEMALLOC_INTERNAL_MUTEX_TYPES_H
typedef struct malloc_mutex_s malloc_mutex_t;
typedef enum {
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive,
/*
* Can acquire multiple mutexes of the same witness rank, but in
* address-ascending order only.
*/
malloc_mutex_address_ordered
} malloc_mutex_lock_order_t;
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
#endif /* JEMALLOC_INTERNAL_MUTEX_TYPES_H */

View File

@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H #ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H #define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
extern malloc_mutex_t bt2gctx_mtx; extern malloc_mutex_t bt2gctx_mtx;
extern bool opt_prof; extern bool opt_prof;

View File

@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H #ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H #define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static inline bool static inline bool
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
cassert(config_prof); cassert(config_prof);

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H #define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h" #include "jemalloc/internal/rb.h"

View File

@ -2,7 +2,7 @@
#define JEMALLOC_INTERNAL_RTREE_STRUCTS_H #define JEMALLOC_INTERNAL_RTREE_STRUCTS_H
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_pool_structs.h" #include "jemalloc/internal/mutex.h"
struct rtree_node_elm_s { struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */

View File

@ -3,9 +3,9 @@
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/mutex_types.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_structs.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats_tsd.h"
/* The opt.stats_print storage. */ /* The opt.stats_print storage. */
extern bool opt_stats_print; extern bool opt_stats_print;
@ -26,14 +26,6 @@ typedef atomic_u64_t arena_stats_u64_t;
typedef uint64_t arena_stats_u64_t; typedef uint64_t arena_stats_u64_t;
#endif #endif
typedef struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
} tcache_bin_stats_t;
typedef struct malloc_bin_stats_s { typedef struct malloc_bin_stats_s {
/* /*
* Total number of allocation/deallocation requests served directly by * Total number of allocation/deallocation requests served directly by

View File

@ -0,0 +1,12 @@
#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
#define JEMALLOC_INTERNAL_STATS_TSD_H
typedef struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
} tcache_bin_stats_t;
#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */

View File

@ -3,7 +3,7 @@
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats_tsd.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"
/* /*

View File

@ -10,11 +10,8 @@ struct tsd_init_block_s {
void *data; void *data;
}; };
/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
typedef struct tsd_init_head_s tsd_init_head_t; typedef struct tsd_init_head_s tsd_init_head_t;
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
typedef struct { typedef struct {
bool initialized; bool initialized;

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ph.h" #include "jemalloc/internal/ph.h"
#include "jemalloc/internal/mutex.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */

View File

@ -7,6 +7,7 @@
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/spin.h" #include "jemalloc/internal/spin.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
/******************************************************************************/ /******************************************************************************/

View File

@ -3,6 +3,8 @@
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/mutex.h"
bool bool
mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {

View File

@ -6,6 +6,7 @@
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
/******************************************************************************/ /******************************************************************************/

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
/* /*
* Only the most significant bits of keys passed to rtree_{read,write}() are * Only the most significant bits of keys passed to rtree_{read,write}() are

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/mutex_prof.h"
const char *global_mutex_names[mutex_prof_num_global_mutexes] = { const char *global_mutex_names[mutex_prof_num_global_mutexes] = {

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
/******************************************************************************/ /******************************************************************************/

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
@ -23,6 +24,17 @@ DWORD tsd_tsd;
tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
bool tsd_booted = false; bool tsd_booted = false;
#else #else
/*
* This contains a mutex, but it's pretty convenient to allow the mutex code to
* have a dependency on tsd. So we define the struct here, and only refer to it
* by pointer in the header.
*/
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
pthread_key_t tsd_tsd; pthread_key_t tsd_tsd;
tsd_init_head_t tsd_init_head = { tsd_init_head_t tsd_init_head = {
ql_head_initializer(blocks), ql_head_initializer(blocks),