Added custom mutex spin.

A fixed max spin count is used -- with benchmark results showing it
solves almost all problems. As the benchmark used was rather intense,
the upper bound could be a little bit high. However it should offer a
good tradeoff between spinning and blocking.
This commit is contained in:
Qi Wang 2017-03-15 15:31:37 -07:00 committed by Qi Wang
parent 20b8c70e9f
commit 74f78cafda
3 changed files with 27 additions and 17 deletions

View File

@ -52,6 +52,7 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_lock_slow(mutex); malloc_mutex_lock_slow(mutex);
} }
/* We own the lock now. Update a few counters. */ /* We own the lock now. Update a few counters. */
if (config_stats) {
mutex_prof_data_t *data = &mutex->prof_data; mutex_prof_data_t *data = &mutex->prof_data;
data->n_lock_ops++; data->n_lock_ops++;
if (data->prev_owner != tsdn) { if (data->prev_owner != tsdn) {
@ -59,6 +60,7 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
data->n_owner_switches++; data->n_owner_switches++;
} }
} }
}
witness_lock(tsdn, &mutex->witness); witness_lock(tsdn, &mutex->witness);
} }

View File

@ -4,6 +4,12 @@
typedef struct mutex_prof_data_s mutex_prof_data_t; typedef struct mutex_prof_data_s mutex_prof_data_t;
typedef struct malloc_mutex_s malloc_mutex_t; typedef struct malloc_mutex_s malloc_mutex_t;
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600 # if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
@ -45,20 +51,10 @@ typedef struct malloc_mutex_s malloc_mutex_t;
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else #else
/* TODO: get rid of adaptive mutex once we do our own spin. */
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, \
PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \ # define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
#endif #endif
#endif /* JEMALLOC_INTERNAL_MUTEX_TYPES_H */ #endif /* JEMALLOC_INTERNAL_MUTEX_TYPES_H */

View File

@ -69,14 +69,26 @@ void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) { malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data; mutex_prof_data_t *data = &mutex->prof_data;
{//TODO: a smart spin policy if (ncpus == 1) {
goto label_spin_done;
}
int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
do {
CPU_SPINWAIT;
if (!malloc_mutex_trylock(mutex)) { if (!malloc_mutex_trylock(mutex)) {
data->n_spin_acquired++; data->n_spin_acquired++;
return; return;
} }
} } while (cnt++ < max_cnt);
if (!config_stats) {
/* Only spin is useful when stats is off. */
malloc_mutex_lock_final(mutex);
return;
}
nstime_t now, before; nstime_t now, before;
label_spin_done:
nstime_init(&now, 0); nstime_init(&now, 0);
nstime_update(&now); nstime_update(&now);
nstime_copy(&before, &now); nstime_copy(&before, &now);