2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_MUTEX_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-04-12 04:06:31 +08:00
|
|
|
#include "jemalloc/internal/malloc_io.h"
|
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifndef _CRT_SPINCOUNT
|
2017-01-20 13:41:41 +08:00
|
|
|
#define _CRT_SPINCOUNT 4000
|
2012-04-22 12:27:46 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_LAZY_LOCK
|
|
|
|
bool isthreaded = false;
|
|
|
|
#endif
|
2012-04-03 23:47:07 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
|
|
|
static bool postpone_init = true;
|
|
|
|
static malloc_mutex_t *postponed_mutexes = NULL;
|
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* We intercept pthread_create() calls in order to toggle isthreaded if the
|
|
|
|
* process goes multi-threaded.
|
|
|
|
*/
|
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
2010-01-17 01:53:50 +08:00
|
|
|
static void
|
2017-01-16 08:56:30 +08:00
|
|
|
pthread_create_once(void) {
|
2017-03-18 03:42:33 +08:00
|
|
|
pthread_create_fptr = load_pthread_create_fptr();
|
2010-01-17 01:53:50 +08:00
|
|
|
isthreaded = true;
|
|
|
|
}
|
|
|
|
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT int
|
2010-01-17 01:53:50 +08:00
|
|
|
pthread_create(pthread_t *__restrict thread,
|
|
|
|
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
|
2017-01-16 08:56:30 +08:00
|
|
|
void *__restrict arg) {
|
2010-01-17 01:53:50 +08:00
|
|
|
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
|
|
|
|
|
|
|
pthread_once(&once_control, pthread_create_once);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return pthread_create_fptr(thread, attr, start_routine, arg);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2012-09-18 20:40:31 +08:00
|
|
|
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
2012-02-03 14:04:57 +08:00
|
|
|
void *(calloc_cb)(size_t, size_t));
|
|
|
|
#endif
|
|
|
|
|
2017-02-24 06:18:07 +08:00
|
|
|
void
|
|
|
|
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_prof_data_t *data = &mutex->prof_data;
|
2017-03-18 08:42:10 +08:00
|
|
|
UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
|
2017-02-24 06:18:07 +08:00
|
|
|
|
2017-03-16 06:31:37 +08:00
|
|
|
if (ncpus == 1) {
|
|
|
|
goto label_spin_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
|
|
|
|
do {
|
|
|
|
CPU_SPINWAIT;
|
2017-04-22 06:05:43 +08:00
|
|
|
if (!malloc_mutex_trylock_final(mutex)) {
|
2017-03-11 04:14:05 +08:00
|
|
|
data->n_spin_acquired++;
|
|
|
|
return;
|
2017-02-24 06:18:07 +08:00
|
|
|
}
|
2017-03-16 06:31:37 +08:00
|
|
|
} while (cnt++ < max_cnt);
|
2017-02-24 06:18:07 +08:00
|
|
|
|
2017-03-16 06:31:37 +08:00
|
|
|
if (!config_stats) {
|
|
|
|
/* Only spin is useful when stats is off. */
|
|
|
|
malloc_mutex_lock_final(mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
label_spin_done:
|
2017-03-18 08:42:10 +08:00
|
|
|
nstime_update(&before);
|
|
|
|
/* Copy before to after to avoid clock skews. */
|
|
|
|
nstime_t after;
|
|
|
|
nstime_copy(&after, &before);
|
2017-04-05 09:34:01 +08:00
|
|
|
uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
|
|
|
|
ATOMIC_RELAXED) + 1;
|
2017-02-24 06:18:07 +08:00
|
|
|
/* One last try as above two calls may take quite some cycles. */
|
2017-04-22 06:05:43 +08:00
|
|
|
if (!malloc_mutex_trylock_final(mutex)) {
|
2017-04-05 09:34:01 +08:00
|
|
|
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
|
2017-03-11 04:14:05 +08:00
|
|
|
data->n_spin_acquired++;
|
|
|
|
return;
|
2017-02-24 06:18:07 +08:00
|
|
|
}
|
2017-03-11 04:14:05 +08:00
|
|
|
|
|
|
|
/* True slow path. */
|
2017-02-24 06:18:07 +08:00
|
|
|
malloc_mutex_lock_final(mutex);
|
2017-03-18 08:42:10 +08:00
|
|
|
/* Update more slow-path only counters. */
|
2017-04-05 09:34:01 +08:00
|
|
|
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
|
2017-03-18 08:42:10 +08:00
|
|
|
nstime_update(&after);
|
|
|
|
|
|
|
|
nstime_t delta;
|
|
|
|
nstime_copy(&delta, &after);
|
|
|
|
nstime_subtract(&delta, &before);
|
2017-03-11 04:14:05 +08:00
|
|
|
|
|
|
|
data->n_wait_times++;
|
2017-03-18 08:42:10 +08:00
|
|
|
nstime_add(&data->tot_wait_time, &delta);
|
|
|
|
if (nstime_compare(&data->max_wait_time, &delta) < 0) {
|
|
|
|
nstime_copy(&data->max_wait_time, &delta);
|
2017-03-11 04:14:05 +08:00
|
|
|
}
|
|
|
|
if (n_thds > data->max_n_thds) {
|
|
|
|
data->max_n_thds = n_thds;
|
2017-02-24 06:18:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-11 04:14:05 +08:00
|
|
|
static void
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_prof_data_init(mutex_prof_data_t *data) {
|
|
|
|
memset(data, 0, sizeof(mutex_prof_data_t));
|
2017-03-11 04:14:05 +08:00
|
|
|
data->prev_owner = NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-14 08:29:03 +08:00
|
|
|
void
|
|
|
|
malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, mutex);
|
|
|
|
mutex_prof_data_init(&mutex->prof_data);
|
|
|
|
}
|
|
|
|
|
2017-05-16 06:38:15 +08:00
|
|
|
static int
|
|
|
|
mutex_addr_comp(const witness_t *witness1, void *mutex1,
|
|
|
|
const witness_t *witness2, void *mutex2) {
|
|
|
|
assert(mutex1 != NULL);
|
|
|
|
assert(mutex2 != NULL);
|
|
|
|
uintptr_t mu1int = (uintptr_t)mutex1;
|
|
|
|
uintptr_t mu2int = (uintptr_t)mutex2;
|
|
|
|
if (mu1int < mu2int) {
|
|
|
|
return -1;
|
|
|
|
} else if (mu1int == mu2int) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
2017-05-16 06:38:15 +08:00
|
|
|
witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_prof_data_init(&mutex->prof_data);
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
2015-06-26 04:53:58 +08:00
|
|
|
# if _WIN32_WINNT >= 0x0600
|
|
|
|
InitializeSRWLock(&mutex->lock);
|
|
|
|
# else
|
2012-04-22 12:27:46 +08:00
|
|
|
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
|
2017-01-16 08:56:30 +08:00
|
|
|
_CRT_SPINCOUNT)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-06-26 04:53:58 +08:00
|
|
|
# endif
|
2016-11-03 09:09:45 +08:00
|
|
|
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
|
|
|
mutex->lock = OS_UNFAIR_LOCK_INIT;
|
2012-04-22 12:27:46 +08:00
|
|
|
#elif (defined(JEMALLOC_OSSPIN))
|
2012-04-03 23:47:07 +08:00
|
|
|
mutex->lock = 0;
|
2012-02-03 14:04:57 +08:00
|
|
|
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
2012-04-03 23:47:07 +08:00
|
|
|
if (postpone_init) {
|
|
|
|
mutex->postponed_next = postponed_mutexes;
|
|
|
|
postponed_mutexes = mutex;
|
|
|
|
} else {
|
2015-01-31 13:49:19 +08:00
|
|
|
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
|
2017-01-16 08:56:30 +08:00
|
|
|
bootstrap_calloc) != 0) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-04-03 23:47:07 +08:00
|
|
|
}
|
2011-03-19 10:30:18 +08:00
|
|
|
#else
|
2010-01-17 01:53:50 +08:00
|
|
|
pthread_mutexattr_t attr;
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pthread_mutexattr_init(&attr) != 0) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
|
2012-04-03 23:47:07 +08:00
|
|
|
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
|
2010-01-17 01:53:50 +08:00
|
|
|
pthread_mutexattr_destroy(&attr);
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
pthread_mutexattr_destroy(&attr);
|
2011-03-19 10:30:18 +08:00
|
|
|
#endif
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_debug) {
|
2017-05-16 06:38:15 +08:00
|
|
|
mutex->lock_order = lock_order;
|
|
|
|
if (lock_order == malloc_mutex_address_ordered) {
|
|
|
|
witness_init(&mutex->witness, name, rank,
|
|
|
|
mutex_addr_comp, &mutex);
|
|
|
|
} else {
|
|
|
|
witness_init(&mutex->witness, name, rank, NULL, NULL);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2010-10-03 06:18:50 +08:00
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, mutex);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, mutex);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, mutex);
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(mutex, mutex->witness.name,
|
2017-05-16 06:38:15 +08:00
|
|
|
mutex->witness.rank, mutex->lock_order)) {
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
|
|
|
"child\n");
|
2017-01-16 08:56:30 +08:00
|
|
|
if (opt_abort) {
|
2012-03-14 07:31:41 +08:00
|
|
|
abort();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2012-04-03 23:47:07 +08:00
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
malloc_mutex_boot(void) {
|
2012-04-03 23:47:07 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
|
|
|
postpone_init = false;
|
|
|
|
while (postponed_mutexes != NULL) {
|
|
|
|
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
|
2017-01-16 08:56:30 +08:00
|
|
|
bootstrap_calloc) != 0) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-04-03 23:47:07 +08:00
|
|
|
postponed_mutexes = postponed_mutexes->postponed_next;
|
|
|
|
}
|
|
|
|
#endif
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2012-04-03 23:47:07 +08:00
|
|
|
}
|