Switch to nstime_t for the time related fields in mutex profiling.

This commit is contained in:
Qi Wang 2017-03-17 17:42:10 -07:00 committed by Qi Wang
parent 74f78cafda
commit f6698ec1e6
5 changed files with 25 additions and 21 deletions

View File

@ -29,10 +29,11 @@ malloc_mutex_trylock(malloc_mutex_t *mutex) {
/* Aggregate lock prof data. */ /* Aggregate lock prof data. */
JEMALLOC_INLINE void JEMALLOC_INLINE void
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
sum->tot_wait_time += data->tot_wait_time; nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
if (data->max_wait_time > sum->max_wait_time) { if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
sum->max_wait_time = data->max_wait_time; nstime_copy(&sum->max_wait_time, &data->max_wait_time);
} }
sum->n_wait_times += data->n_wait_times; sum->n_wait_times += data->n_wait_times;
sum->n_spin_acquired += data->n_spin_acquired; sum->n_spin_acquired += data->n_spin_acquired;

View File

@ -7,9 +7,9 @@ struct mutex_prof_data_s {
* contention. We update them once we have the lock. * contention. We update them once we have the lock.
*/ */
/* Total time (in nano seconds) spent waiting on this mutex. */ /* Total time (in nano seconds) spent waiting on this mutex. */
uint64_t tot_wait_time; nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */ /* Max time (in nano seconds) spent on a single lock operation. */
uint64_t max_wait_time; nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */ /* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times; uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */ /* # of times acquired the mutex through local spinning. */

View File

@ -34,7 +34,8 @@ typedef struct malloc_mutex_s malloc_mutex_t;
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif #endif
#define LOCK_PROF_DATA_INITIALIZER {0, 0, 0, 0, 0, 0, 0, NULL, 0} #define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, 0, 0, NULL, 0}
#ifdef _WIN32 #ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER # define MALLOC_MUTEX_INITIALIZER

View File

@ -2485,9 +2485,9 @@ CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
l.n_owner_switches, uint64_t) \ l.n_owner_switches, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
l.tot_wait_time, uint64_t) \ nstime_ns(&l.tot_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
l.max_wait_time, uint64_t) \ nstime_ns(&l.max_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
l.max_n_thds, uint64_t) l.max_n_thds, uint64_t)

View File

@ -68,6 +68,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) { malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data; mutex_prof_data_t *data = &mutex->prof_data;
UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
if (ncpus == 1) { if (ncpus == 1) {
goto label_spin_done; goto label_spin_done;
@ -87,12 +88,11 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
malloc_mutex_lock_final(mutex); malloc_mutex_lock_final(mutex);
return; return;
} }
nstime_t now, before;
label_spin_done: label_spin_done:
nstime_init(&now, 0); nstime_update(&before);
nstime_update(&now); /* Copy before to after to avoid clock skews. */
nstime_copy(&before, &now); nstime_t after;
nstime_copy(&after, &before);
uint32_t n_thds = atomic_add_u32(&data->n_waiting_thds, 1); uint32_t n_thds = atomic_add_u32(&data->n_waiting_thds, 1);
/* One last try as above two calls may take quite some cycles. */ /* One last try as above two calls may take quite some cycles. */
if (!malloc_mutex_trylock(mutex)) { if (!malloc_mutex_trylock(mutex)) {
@ -103,16 +103,18 @@ label_spin_done:
/* True slow path. */ /* True slow path. */
malloc_mutex_lock_final(mutex); malloc_mutex_lock_final(mutex);
atomic_sub_u32(&data->n_waiting_thds, 1);
nstime_update(&now);
/* Update more slow-path only counters. */ /* Update more slow-path only counters. */
nstime_subtract(&now, &before); atomic_sub_u32(&data->n_waiting_thds, 1);
uint64_t wait_time = nstime_ns(&now); nstime_update(&after);
nstime_t delta;
nstime_copy(&delta, &after);
nstime_subtract(&delta, &before);
data->n_wait_times++; data->n_wait_times++;
data->tot_wait_time += wait_time; nstime_add(&data->tot_wait_time, &delta);
if (wait_time > data->max_wait_time) { if (nstime_compare(&data->max_wait_time, &delta) < 0) {
data->max_wait_time = wait_time; nstime_copy(&data->max_wait_time, &delta);
} }
if (n_thds > data->max_n_thds) { if (n_thds > data->max_n_thds) {
data->max_n_thds = n_thds; data->max_n_thds = n_thds;