Implement malloc_mutex_trylock() w/ proper stats update.

This commit is contained in:
Qi Wang 2017-04-21 15:05:43 -07:00 committed by Qi Wang
parent af76f0e5d2
commit f970c497dc
3 changed files with 36 additions and 14 deletions

View File

@ -10,10 +10,36 @@ malloc_mutex_lock_final(malloc_mutex_t *mutex) {
MALLOC_MUTEX_LOCK(mutex); MALLOC_MUTEX_LOCK(mutex);
} }
static inline bool
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
return MALLOC_MUTEX_TRYLOCK(mutex);
}
static inline void
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
if (config_stats) {
mutex_prof_data_t *data = &mutex->prof_data;
data->n_lock_ops++;
if (data->prev_owner != tsdn) {
data->prev_owner = tsdn;
data->n_owner_switches++;
}
}
}
/* Trylock: return false if the lock is successfully acquired. */ /* Trylock: return false if the lock is successfully acquired. */
static inline bool static inline bool
malloc_mutex_trylock(malloc_mutex_t *mutex) { malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
return MALLOC_MUTEX_TRYLOCK(mutex); witness_assert_not_owner(tsdn, &mutex->witness);
if (isthreaded) {
if (malloc_mutex_trylock_final(mutex)) {
return true;
}
mutex_owner_stats_update(tsdn, mutex);
}
witness_lock(tsdn, &mutex->witness);
return false;
} }
/* Aggregate lock prof data. */ /* Aggregate lock prof data. */
@ -44,18 +70,10 @@ static inline void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn, &mutex->witness); witness_assert_not_owner(tsdn, &mutex->witness);
if (isthreaded) { if (isthreaded) {
if (malloc_mutex_trylock(mutex)) { if (malloc_mutex_trylock_final(mutex)) {
malloc_mutex_lock_slow(mutex); malloc_mutex_lock_slow(mutex);
} }
/* We own the lock now. Update a few counters. */ mutex_owner_stats_update(tsdn, mutex);
if (config_stats) {
mutex_prof_data_t *data = &mutex->prof_data;
data->n_lock_ops++;
if (data->prev_owner != tsdn) {
data->prev_owner = tsdn;
data->n_owner_switches++;
}
}
} }
witness_lock(tsdn, &mutex->witness); witness_lock(tsdn, &mutex->witness);
} }

View File

@ -292,10 +292,13 @@ malloc_mutex_assert_owner
malloc_mutex_boot malloc_mutex_boot
malloc_mutex_init malloc_mutex_init
malloc_mutex_lock malloc_mutex_lock
malloc_mutex_lock_final
malloc_mutex_lock_slow malloc_mutex_lock_slow
malloc_mutex_postfork_child malloc_mutex_postfork_child
malloc_mutex_postfork_parent malloc_mutex_postfork_parent
malloc_mutex_prefork malloc_mutex_prefork
malloc_mutex_trylock
malloc_mutex_trylock_final
malloc_mutex_unlock malloc_mutex_unlock
malloc_printf malloc_printf
malloc_slow malloc_slow
@ -309,6 +312,7 @@ malloc_tsd_malloc
malloc_vcprintf malloc_vcprintf
malloc_vsnprintf malloc_vsnprintf
malloc_write malloc_write
mutex_owner_stats_update
narenas_auto narenas_auto
narenas_total_get narenas_total_get
ncpus ncpus

View File

@ -81,7 +81,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
do { do {
CPU_SPINWAIT; CPU_SPINWAIT;
if (!malloc_mutex_trylock(mutex)) { if (!malloc_mutex_trylock_final(mutex)) {
data->n_spin_acquired++; data->n_spin_acquired++;
return; return;
} }
@ -100,7 +100,7 @@ label_spin_done:
uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
ATOMIC_RELAXED) + 1; ATOMIC_RELAXED) + 1;
/* One last try as above two calls may take quite some cycles. */ /* One last try as above two calls may take quite some cycles. */
if (!malloc_mutex_trylock(mutex)) { if (!malloc_mutex_trylock_final(mutex)) {
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
data->n_spin_acquired++; data->n_spin_acquired++;
return; return;