Add max_per_bg_thd stats for per background thread mutexes.

Added a new stats row to aggregate the maximum value of mutex counters for each
background threads.  Given that the per bg thd mutex is not expected to be
contended, this counter is mainly for sanity check / debugging.
This commit is contained in:
zhxchen17 2019-08-14 16:10:09 -07:00 committed by Qi Wang
parent 4b76c684bb
commit b7c7df24ba
5 changed files with 55 additions and 11 deletions

View File

@ -48,6 +48,7 @@ struct background_thread_stats_s {
size_t num_threads; size_t num_threads;
uint64_t num_runs; uint64_t num_runs;
nstime_t run_interval; nstime_t run_interval;
mutex_prof_data_t max_counter_per_bg_thd;
}; };
typedef struct background_thread_stats_s background_thread_stats_t; typedef struct background_thread_stats_s background_thread_stats_t;

View File

@ -245,22 +245,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
} }
/* Copy the prof data from mutex for processing. */
static inline void static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
/* /*
* Not *really* allowed (we shouldn't be doing non-atomic loads of * Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing * atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation. * a member-for-member copy is tedious for this situation.
*/ */
*data = *source; *dst = *source;
/* n_wait_thds is not reported (modified w/o locking). */ /* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
}
/* Copy the prof data from mutex for processing. */
static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
malloc_mutex_prof_copy(data, &mutex->prof_data);
} }
static inline void static inline void
@ -285,4 +288,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
data->n_lock_ops += source->n_lock_ops; data->n_lock_ops += source->n_lock_ops;
} }
/* Compare the prof data and update to the maximum. */
static inline void
malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
}
if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
nstime_copy(&data->max_wait_time, &source->max_wait_time);
}
if (source->n_wait_times > data->n_wait_times) {
data->n_wait_times = source->n_wait_times;
}
if (source->n_spin_acquired > data->n_spin_acquired) {
data->n_spin_acquired = source->n_spin_acquired;
}
if (source->max_n_thds > data->max_n_thds) {
data->max_n_thds = source->max_n_thds;
}
if (source->n_owner_switches > data->n_owner_switches) {
data->n_owner_switches = source->n_owner_switches;
}
if (source->n_lock_ops > data->n_lock_ops) {
data->n_lock_ops = source->n_lock_ops;
}
/* n_wait_thds is not reported. */
}
#endif /* JEMALLOC_INTERNAL_MUTEX_H */ #endif /* JEMALLOC_INTERNAL_MUTEX_H */

View File

@ -7,6 +7,7 @@
#define MUTEX_PROF_GLOBAL_MUTEXES \ #define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \ OP(background_thread) \
OP(max_per_bg_thd) \
OP(ctl) \ OP(ctl) \
OP(prof) \ OP(prof) \
OP(prof_thds_data) \ OP(prof_thds_data) \

View File

@ -794,9 +794,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return true; return true;
} }
stats->num_threads = n_background_threads;
uint64_t num_runs = 0;
nstime_init(&stats->run_interval, 0); nstime_init(&stats->run_interval, 0);
memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
uint64_t num_runs = 0;
stats->num_threads = n_background_threads;
for (unsigned i = 0; i < max_background_threads; i++) { for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i]; background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) { if (malloc_mutex_trylock(tsdn, &info->mtx)) {
@ -809,6 +811,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if (info->state != background_thread_stopped) { if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs; num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time); nstime_add(&stats->run_interval, &info->tot_sleep_time);
malloc_mutex_prof_max_update(tsdn,
&stats->max_counter_per_bg_thd, &info->mtx);
} }
malloc_mutex_unlock(tsdn, &info->mtx); malloc_mutex_unlock(tsdn, &info->mtx);
} }

View File

@ -1042,6 +1042,9 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
memset(stats, 0, sizeof(background_thread_stats_t)); memset(stats, 0, sizeof(background_thread_stats_t));
nstime_init(&stats->run_interval, 0); nstime_init(&stats->run_interval, 0);
} }
malloc_mutex_prof_copy(
&ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
&stats->max_counter_per_bg_thd);
} }
static void static void