2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_MUTEX_INLINES_H
|
|
|
|
#define JEMALLOC_INTERNAL_MUTEX_INLINES_H
|
|
|
|
|
2017-04-18 07:17:02 +08:00
|
|
|
#include "jemalloc/internal/nstime.h"
|
|
|
|
|
2017-02-24 06:18:07 +08:00
|
|
|
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-02-24 06:18:07 +08:00
|
|
|
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
|
|
|
MALLOC_MUTEX_LOCK(mutex);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline bool
|
2017-04-22 06:05:43 +08:00
|
|
|
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
|
2017-02-24 06:18:07 +08:00
|
|
|
return MALLOC_MUTEX_TRYLOCK(mutex);
|
|
|
|
}
|
|
|
|
|
2017-04-22 06:05:43 +08:00
|
|
|
static inline void
|
|
|
|
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
|
|
|
if (config_stats) {
|
|
|
|
mutex_prof_data_t *data = &mutex->prof_data;
|
|
|
|
data->n_lock_ops++;
|
|
|
|
if (data->prev_owner != tsdn) {
|
|
|
|
data->prev_owner = tsdn;
|
|
|
|
data->n_owner_switches++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Trylock: return false if the lock is successfully acquired. */
|
|
|
|
static inline bool
|
|
|
|
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
|
|
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
|
|
|
if (isthreaded) {
|
|
|
|
if (malloc_mutex_trylock_final(mutex)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
mutex_owner_stats_update(tsdn, mutex);
|
|
|
|
}
|
|
|
|
witness_lock(tsdn, &mutex->witness);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-04 11:58:43 +08:00
|
|
|
/* Aggregate lock prof data. */
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-14 08:29:03 +08:00
|
|
|
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
2017-03-18 08:42:10 +08:00
|
|
|
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
|
|
|
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
|
|
|
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
2017-03-04 11:58:43 +08:00
|
|
|
}
|
2017-03-18 08:42:10 +08:00
|
|
|
|
2017-03-04 11:58:43 +08:00
|
|
|
sum->n_wait_times += data->n_wait_times;
|
|
|
|
sum->n_spin_acquired += data->n_spin_acquired;
|
|
|
|
|
|
|
|
if (sum->max_n_thds < data->max_n_thds) {
|
|
|
|
sum->max_n_thds = data->max_n_thds;
|
|
|
|
}
|
2017-04-05 09:34:01 +08:00
|
|
|
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
|
|
|
&data->n_waiting_thds, ATOMIC_RELAXED);
|
|
|
|
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
|
|
|
ATOMIC_RELAXED);
|
2017-03-04 11:58:43 +08:00
|
|
|
sum->n_owner_switches += data->n_owner_switches;
|
|
|
|
sum->n_lock_ops += data->n_lock_ops;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
if (isthreaded) {
|
2017-04-22 06:05:43 +08:00
|
|
|
if (malloc_mutex_trylock_final(mutex)) {
|
2017-02-24 06:18:07 +08:00
|
|
|
malloc_mutex_lock_slow(mutex);
|
|
|
|
}
|
2017-04-22 06:05:43 +08:00
|
|
|
mutex_owner_stats_update(tsdn, mutex);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_lock(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_unlock(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
if (isthreaded) {
|
2017-02-24 06:18:07 +08:00
|
|
|
MALLOC_MUTEX_UNLOCK(mutex);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_assert_owner(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
2017-03-04 11:58:43 +08:00
|
|
|
|
|
|
|
/* Copy the prof data from mutex for processing. */
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-14 08:29:03 +08:00
|
|
|
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
2017-03-04 11:58:43 +08:00
|
|
|
malloc_mutex_t *mutex) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_prof_data_t *source = &mutex->prof_data;
|
|
|
|
/* Can only read holding the mutex. */
|
2017-03-04 11:58:43 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, mutex);
|
|
|
|
|
2017-04-05 09:34:01 +08:00
|
|
|
/*
|
|
|
|
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
|
|
|
* atomic data), but the mutex protection makes this safe, and writing
|
|
|
|
* a member-for-member copy is tedious for this situation.
|
|
|
|
*/
|
2017-03-04 11:58:43 +08:00
|
|
|
*data = *source;
|
|
|
|
/* n_wait_thds is not reported (modified w/o locking). */
|
2017-04-05 09:34:01 +08:00
|
|
|
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
2017-03-04 11:58:43 +08:00
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_MUTEX_INLINES_H */
|