2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_MUTEX_INLINES_H
|
|
|
|
#define JEMALLOC_INTERNAL_MUTEX_INLINES_H
|
|
|
|
|
2017-02-24 06:18:07 +08:00
|
|
|
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
|
|
|
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
2017-02-24 06:18:07 +08:00
|
|
|
bool malloc_mutex_trylock(malloc_mutex_t *mutex);
|
2017-01-11 10:06:31 +08:00
|
|
|
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
|
|
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
|
|
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
2017-03-14 08:29:03 +08:00
|
|
|
void malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
2017-03-04 11:58:43 +08:00
|
|
|
malloc_mutex_t *mutex);
|
2017-03-14 08:29:03 +08:00
|
|
|
void malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data);
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
2017-02-24 06:18:07 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
|
|
|
MALLOC_MUTEX_LOCK(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Trylock: return false if the lock is successfully acquired. */
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
malloc_mutex_trylock(malloc_mutex_t *mutex) {
|
|
|
|
return MALLOC_MUTEX_TRYLOCK(mutex);
|
|
|
|
}
|
|
|
|
|
2017-03-04 11:58:43 +08:00
|
|
|
/* Aggregate lock prof data. */
|
|
|
|
JEMALLOC_INLINE void
|
2017-03-14 08:29:03 +08:00
|
|
|
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
2017-03-18 08:42:10 +08:00
|
|
|
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
|
|
|
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
|
|
|
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
2017-03-04 11:58:43 +08:00
|
|
|
}
|
2017-03-18 08:42:10 +08:00
|
|
|
|
2017-03-04 11:58:43 +08:00
|
|
|
sum->n_wait_times += data->n_wait_times;
|
|
|
|
sum->n_spin_acquired += data->n_spin_acquired;
|
|
|
|
|
|
|
|
if (sum->max_n_thds < data->max_n_thds) {
|
|
|
|
sum->max_n_thds = data->max_n_thds;
|
|
|
|
}
|
2017-04-05 09:34:01 +08:00
|
|
|
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
|
|
|
&data->n_waiting_thds, ATOMIC_RELAXED);
|
|
|
|
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
|
|
|
ATOMIC_RELAXED);
|
2017-03-04 11:58:43 +08:00
|
|
|
sum->n_owner_switches += data->n_owner_switches;
|
|
|
|
sum->n_lock_ops += data->n_lock_ops;
|
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
if (isthreaded) {
|
2017-02-24 06:18:07 +08:00
|
|
|
if (malloc_mutex_trylock(mutex)) {
|
|
|
|
malloc_mutex_lock_slow(mutex);
|
|
|
|
}
|
|
|
|
/* We own the lock now. Update a few counters. */
|
2017-03-16 06:31:37 +08:00
|
|
|
if (config_stats) {
|
|
|
|
mutex_prof_data_t *data = &mutex->prof_data;
|
|
|
|
data->n_lock_ops++;
|
|
|
|
if (data->prev_owner != tsdn) {
|
|
|
|
data->prev_owner = tsdn;
|
|
|
|
data->n_owner_switches++;
|
|
|
|
}
|
2017-02-24 06:18:07 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_lock(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_unlock(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
if (isthreaded) {
|
2017-02-24 06:18:07 +08:00
|
|
|
MALLOC_MUTEX_UNLOCK(mutex);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_assert_owner(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2017-02-10 01:06:22 +08:00
|
|
|
witness_assert_not_owner(tsdn, &mutex->witness);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
2017-03-04 11:58:43 +08:00
|
|
|
|
|
|
|
/* Copy the prof data from mutex for processing. */
|
|
|
|
JEMALLOC_INLINE void
|
2017-03-14 08:29:03 +08:00
|
|
|
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
2017-03-04 11:58:43 +08:00
|
|
|
malloc_mutex_t *mutex) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_prof_data_t *source = &mutex->prof_data;
|
|
|
|
/* Can only read holding the mutex. */
|
2017-03-04 11:58:43 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, mutex);
|
|
|
|
|
2017-04-05 09:34:01 +08:00
|
|
|
/*
|
|
|
|
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
|
|
|
* atomic data), but the mutex protection makes this safe, and writing
|
|
|
|
* a member-for-member copy is tedious for this situation.
|
|
|
|
*/
|
2017-03-04 11:58:43 +08:00
|
|
|
*data = *source;
|
|
|
|
/* n_wait_thds is not reported (modified w/o locking). */
|
2017-04-05 09:34:01 +08:00
|
|
|
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
2017-03-04 11:58:43 +08:00
|
|
|
}
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_MUTEX_INLINES_H */
|