Added "stats.mutexes.reset" mallctl to reset all mutex stats.
Also switched from the term "lock" to "mutex".
This commit is contained in:
@@ -4,13 +4,13 @@
|
||||
/* Maximum ctl tree depth. */
|
||||
#define CTL_MAX_DEPTH 7
|
||||
|
||||
#define NUM_GLOBAL_PROF_LOCKS 3
|
||||
#define NUM_ARENA_PROF_LOCKS 6
|
||||
#define NUM_LOCK_PROF_COUNTERS 7
|
||||
#define NUM_GLOBAL_PROF_MUTEXES 3
|
||||
#define NUM_ARENA_PROF_MUTEXES 6
|
||||
#define NUM_MUTEX_PROF_COUNTERS 7
|
||||
|
||||
extern const char *arena_lock_names[NUM_ARENA_PROF_LOCKS];
|
||||
extern const char *global_lock_names[NUM_GLOBAL_PROF_LOCKS];
|
||||
extern const char *lock_counter_names[NUM_LOCK_PROF_COUNTERS];
|
||||
extern const char *arena_mutex_names[NUM_ARENA_PROF_MUTEXES];
|
||||
extern const char *global_mutex_names[NUM_GLOBAL_PROF_MUTEXES];
|
||||
extern const char *mutex_counter_names[NUM_MUTEX_PROF_COUNTERS];
|
||||
|
||||
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
|
@@ -42,8 +42,8 @@ struct ctl_stats_s {
|
||||
size_t mapped;
|
||||
size_t retained;
|
||||
|
||||
#define MTX(mutex) lock_prof_data_t mutex##_mtx_data;
|
||||
GLOBAL_PROF_MUTEXES
|
||||
#define MTX(mutex) mutex_prof_data_t mutex##_mtx_data;
|
||||
GLOBAL_PROF_MUTEXES
|
||||
#undef MTX
|
||||
};
|
||||
|
||||
|
@@ -14,5 +14,6 @@ void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
bool malloc_mutex_boot(void);
|
||||
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_EXTERNS_H */
|
||||
|
@@ -9,9 +9,9 @@ bool malloc_mutex_trylock(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_lock_prof_read(tsdn_t *tsdn, lock_prof_data_t *data,
|
||||
void malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex);
|
||||
void malloc_lock_prof_merge(lock_prof_data_t *sum, lock_prof_data_t *data);
|
||||
void malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
@@ -28,7 +28,7 @@ malloc_mutex_trylock(malloc_mutex_t *mutex) {
|
||||
|
||||
/* Aggregate lock prof data. */
|
||||
JEMALLOC_INLINE void
|
||||
malloc_lock_prof_merge(lock_prof_data_t *sum, lock_prof_data_t *data) {
|
||||
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||
sum->tot_wait_time += data->tot_wait_time;
|
||||
if (data->max_wait_time > sum->max_wait_time) {
|
||||
sum->max_wait_time = data->max_wait_time;
|
||||
@@ -52,7 +52,7 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_lock_slow(mutex);
|
||||
}
|
||||
/* We own the lock now. Update a few counters. */
|
||||
lock_prof_data_t *data = &mutex->prof_data;
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
data->n_lock_ops++;
|
||||
if (data->prev_owner != tsdn) {
|
||||
data->prev_owner = tsdn;
|
||||
@@ -82,10 +82,10 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
|
||||
/* Copy the prof data from mutex for processing. */
|
||||
JEMALLOC_INLINE void
|
||||
malloc_lock_prof_read(tsdn_t *tsdn, lock_prof_data_t *data,
|
||||
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
lock_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read with the lock. */
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
*data = *source;
|
||||
|
@@ -1,20 +1,20 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
|
||||
|
||||
struct lock_prof_data_s {
|
||||
struct mutex_prof_data_s {
|
||||
/*
|
||||
* Counters touched on the slow path, i.e. when there is lock
|
||||
* contention. We update them once we have the lock.
|
||||
*/
|
||||
/* Total time (in nano seconds) spent waiting on this lock. */
|
||||
/* Total time (in nano seconds) spent waiting on this mutex. */
|
||||
uint64_t tot_wait_time;
|
||||
/* Max time (in nano seconds) spent on a single lock operation. */
|
||||
uint64_t max_wait_time;
|
||||
/* # of times have to wait for this lock (after spinning). */
|
||||
/* # of times have to wait for this mutex (after spinning). */
|
||||
uint64_t n_wait_times;
|
||||
/* # of times acquired the lock through local spinning. */
|
||||
/* # of times acquired the mutex through local spinning. */
|
||||
uint64_t n_spin_acquired;
|
||||
/* Max # of threads waiting for the lock at the same time. */
|
||||
/* Max # of threads waiting for the mutex at the same time. */
|
||||
uint32_t max_n_thds;
|
||||
/* Current # of threads waiting on the lock. Atomic synced. */
|
||||
uint32_t n_waiting_thds;
|
||||
@@ -25,9 +25,9 @@ struct lock_prof_data_s {
|
||||
* the lock) so that we have a higher chance of them being on the same
|
||||
* cacheline.
|
||||
*/
|
||||
/* # of times the new lock holder is different from the previous one. */
|
||||
/* # of times the mutex holder is different than the previous one. */
|
||||
uint64_t n_owner_switches;
|
||||
/* Previous lock holder, to facilitate n_owner_switches. */
|
||||
/* Previous mutex holder, to facilitate n_owner_switches. */
|
||||
tsdn_t *prev_owner;
|
||||
/* # of lock() operations in total. */
|
||||
uint64_t n_lock_ops;
|
||||
@@ -38,13 +38,13 @@ struct malloc_mutex_s {
|
||||
struct {
|
||||
/*
|
||||
* prof_data is defined first to reduce cacheline
|
||||
* bouncing: the data is not touched by the lock holder
|
||||
* bouncing: the data is not touched by the mutex holder
|
||||
* during unlocking, while might be modified by
|
||||
* contenders. Having it before the lock itself could
|
||||
* contenders. Having it before the mutex itself could
|
||||
* avoid prefetching a modified cacheline (for the
|
||||
* unlocking thread).
|
||||
*/
|
||||
lock_prof_data_t prof_data;
|
||||
mutex_prof_data_t prof_data;
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_TYPES_H
|
||||
|
||||
typedef struct lock_prof_data_s lock_prof_data_t;
|
||||
typedef struct mutex_prof_data_s mutex_prof_data_t;
|
||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@@ -269,6 +269,7 @@ lg_floor
|
||||
lg_prof_sample
|
||||
malloc_cprintf
|
||||
malloc_getcpu
|
||||
malloc_mutex_prof_data_reset
|
||||
malloc_mutex_assert_not_owner
|
||||
malloc_mutex_assert_owner
|
||||
malloc_mutex_boot
|
||||
|
@@ -57,7 +57,7 @@ struct malloc_bin_stats_s {
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
lock_prof_data_t lock_data;
|
||||
mutex_prof_data_t mutex_data;
|
||||
};
|
||||
|
||||
struct malloc_large_stats_s {
|
||||
@@ -124,12 +124,12 @@ struct arena_stats_s {
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
lock_prof_data_t large_mtx_data;
|
||||
lock_prof_data_t extent_freelist_mtx_data;
|
||||
lock_prof_data_t extents_cached_mtx_data;
|
||||
lock_prof_data_t extents_retained_mtx_data;
|
||||
lock_prof_data_t decay_mtx_data;
|
||||
lock_prof_data_t tcache_mtx_data;
|
||||
mutex_prof_data_t large_mtx_data;
|
||||
mutex_prof_data_t extent_freelist_mtx_data;
|
||||
mutex_prof_data_t extents_cached_mtx_data;
|
||||
mutex_prof_data_t extents_retained_mtx_data;
|
||||
mutex_prof_data_t decay_mtx_data;
|
||||
mutex_prof_data_t tcache_mtx_data;
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
|
Reference in New Issue
Block a user