2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_PROF_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
#include "jemalloc/internal/ctl.h"
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2019-09-04 06:04:48 +08:00
|
|
|
#include "jemalloc/internal/thread_event.h"
|
2017-04-12 04:06:31 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
/*
|
|
|
|
* This file implements the profiling "APIs" needed by other parts of jemalloc,
|
|
|
|
* and also manages the relevant "operational" data, mainly options and mutexes;
|
|
|
|
* the core profiling data structures are encapsulated in prof_data.c.
|
|
|
|
*/
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_PROF_LIBUNWIND
|
2017-01-20 13:41:41 +08:00
|
|
|
#define UNW_LOCAL_ONLY
|
2010-02-11 02:37:56 +08:00
|
|
|
#include <libunwind.h>
|
|
|
|
#endif
|
|
|
|
|
2011-03-16 13:23:12 +08:00
|
|
|
#ifdef JEMALLOC_PROF_LIBGCC
|
2017-03-29 08:30:54 +08:00
|
|
|
/*
|
|
|
|
* We have a circular dependency -- jemalloc_internal.h tells us if we should
|
|
|
|
* use libgcc's unwinding functionality, but after we've included that, we've
|
|
|
|
* already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
|
|
|
|
*/
|
|
|
|
#undef _Unwind_Backtrace
|
2011-03-16 13:23:12 +08:00
|
|
|
#include <unwind.h>
|
2018-04-10 09:09:34 +08:00
|
|
|
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
|
2011-03-16 13:23:12 +08:00
|
|
|
#endif
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
bool opt_prof = false;
|
|
|
|
bool opt_prof_active = true;
|
|
|
|
bool opt_prof_thread_active_init = true;
|
|
|
|
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
|
|
|
|
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
|
|
|
|
bool opt_prof_gdump = false;
|
|
|
|
bool opt_prof_final = false;
|
|
|
|
bool opt_prof_leak = false;
|
|
|
|
bool opt_prof_accum = false;
|
|
|
|
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2019-10-15 00:35:51 +08:00
|
|
|
/* Accessed via prof_idump_[accum/rollback](). */
|
2019-12-07 01:45:40 +08:00
|
|
|
static prof_accum_t prof_idump_accumulated;
|
2019-10-15 00:35:51 +08:00
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
/*
|
|
|
|
* Initialized as opt_prof_active, and accessed via
|
|
|
|
* prof_active_[gs]et{_unlocked,}().
|
|
|
|
*/
|
2019-12-07 01:45:40 +08:00
|
|
|
bool prof_active;
|
|
|
|
static malloc_mutex_t prof_active_mtx;
|
2014-10-04 14:25:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialized as opt_prof_thread_active_init, and accessed via
|
|
|
|
* prof_thread_active_init_[gs]et().
|
|
|
|
*/
|
2019-12-07 01:45:40 +08:00
|
|
|
static bool prof_thread_active_init;
|
|
|
|
static malloc_mutex_t prof_thread_active_init_mtx;
|
2014-10-04 14:25:30 +08:00
|
|
|
|
2015-01-26 13:16:57 +08:00
|
|
|
/*
|
|
|
|
* Initialized as opt_prof_gdump, and accessed via
|
|
|
|
* prof_gdump_[gs]et{_unlocked,}().
|
|
|
|
*/
|
2019-12-07 01:45:40 +08:00
|
|
|
bool prof_gdump_val;
|
|
|
|
static malloc_mutex_t prof_gdump_mtx;
|
2015-01-26 13:16:57 +08:00
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
uint64_t prof_interval = 0;
|
2010-02-12 05:19:21 +08:00
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
size_t lg_prof_sample;
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Table of mutexes that are shared among gctx's. These are leaf locks, so
|
|
|
|
* there is no problem with using them for more than one gctx at the same time.
|
|
|
|
* The primary motivation for this sharing though is that gctx's are ephemeral,
|
2012-03-24 09:05:51 +08:00
|
|
|
* and destroying mutexes causes complications for systems that allocate when
|
|
|
|
* creating/destroying mutexes.
|
|
|
|
*/
|
2019-12-07 01:45:40 +08:00
|
|
|
static malloc_mutex_t *gctx_locks;
|
|
|
|
static atomic_u_t cum_gctxs; /* Atomic counter. */
|
2012-03-24 09:05:51 +08:00
|
|
|
|
2010-10-03 13:38:14 +08:00
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Table of mutexes that are shared among tdata's. No operations require
|
|
|
|
* holding multiple tdata locks, so there is no problem with using them for more
|
|
|
|
* than one tdata at the same time, even though a gctx lock may be acquired
|
|
|
|
* while holding a tdata lock.
|
|
|
|
*/
|
2019-12-07 01:45:40 +08:00
|
|
|
static malloc_mutex_t *tdata_locks;
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2017-03-12 12:28:31 +08:00
|
|
|
/* Non static to enable profiling. */
|
2019-12-07 01:45:40 +08:00
|
|
|
malloc_mutex_t bt2gctx_mtx;
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
malloc_mutex_t tdatas_mtx;
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
static uint64_t next_thr_uid;
|
|
|
|
static malloc_mutex_t next_thr_uid_mtx;
|
2010-03-02 12:15:26 +08:00
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
static malloc_mutex_t prof_dump_filename_mtx;
|
|
|
|
static uint64_t prof_dump_seq;
|
|
|
|
static uint64_t prof_dump_iseq;
|
|
|
|
static uint64_t prof_dump_mseq;
|
|
|
|
static uint64_t prof_dump_useq;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
malloc_mutex_t prof_dump_mtx;
|
|
|
|
static char *prof_dump_prefix = NULL;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
/* Do not dump any profiles until bootstrapping is complete. */
|
2019-12-07 01:45:40 +08:00
|
|
|
bool prof_booted = false;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2019-12-05 02:16:44 +08:00
|
|
|
/*
|
|
|
|
* If profiling is off, then PROF_DUMP_FILENAME_LEN is 1, so we'll end up
|
|
|
|
* calling strncpy with a size of 0, which triggers a -Wstringop-truncation
|
|
|
|
* warning (strncpy can never actually be called in this case, since we bail out
|
|
|
|
* much earlier when config_prof is false). This function works around the
|
|
|
|
* warning to let us leave the warning on.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) {
|
|
|
|
cassert(config_prof);
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
strncpy(dest, src, size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
static bool
|
|
|
|
prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
if (opt_prof_accum) {
|
|
|
|
return false;
|
2015-03-13 07:25:18 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
if (tctx->cnts.curobjs != 0) {
|
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
if (tctx->prepared) {
|
|
|
|
return false;
|
2014-10-03 14:01:10 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
return true;
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
2014-09-10 10:37:26 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
|
2014-09-10 10:37:26 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2019-10-04 04:01:12 +08:00
|
|
|
if (tsd_reentrancy_level_get(tsd) > 0) {
|
|
|
|
assert((uintptr_t)tctx == (uintptr_t)1U);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
prof_tdata_t *tdata;
|
|
|
|
|
2014-09-10 10:37:26 +08:00
|
|
|
if (updated) {
|
|
|
|
/*
|
|
|
|
* Compute a new sample threshold. This isn't very important in
|
|
|
|
* practice, because this function is rarely executed, so the
|
|
|
|
* potential for sample bias is minimal except in contrived
|
|
|
|
* programs.
|
|
|
|
*/
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata != NULL) {
|
2019-11-05 09:22:25 +08:00
|
|
|
prof_sample_threshold_update(tsd);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((uintptr_t)tctx > (uintptr_t)1U) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
|
2014-09-10 10:37:26 +08:00
|
|
|
tctx->prepared = false;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tctx_destroy(tsd, tctx);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
void
|
2019-11-23 03:42:01 +08:00
|
|
|
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize,
|
2017-03-21 02:00:07 +08:00
|
|
|
prof_tctx_t *tctx) {
|
2019-12-06 07:15:36 +08:00
|
|
|
prof_tctx_set(tsd, ptr, NULL, tctx);
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
/* Get the current time and set this in the extent_t. We'll read this
|
|
|
|
* when free() is called. */
|
|
|
|
nstime_t t = NSTIME_ZERO_INITIALIZER;
|
|
|
|
nstime_update(&t);
|
2019-11-23 03:42:01 +08:00
|
|
|
prof_alloc_time_set(tsd, ptr, t);
|
2018-07-06 01:56:33 +08:00
|
|
|
|
2019-11-23 03:42:01 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
|
2014-08-19 07:22:13 +08:00
|
|
|
tctx->cnts.curobjs++;
|
|
|
|
tctx->cnts.curbytes += usize;
|
|
|
|
if (opt_prof_accum) {
|
|
|
|
tctx->cnts.accumobjs++;
|
|
|
|
tctx->cnts.accumbytes += usize;
|
|
|
|
}
|
2014-09-10 10:37:26 +08:00
|
|
|
tctx->prepared = false;
|
2019-11-23 03:42:01 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-11-20 08:24:57 +08:00
|
|
|
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
|
|
|
|
assert(prof_info != NULL);
|
2019-12-06 07:35:12 +08:00
|
|
|
prof_tctx_t *tctx = prof_info->alloc_tctx;
|
2019-11-20 08:24:57 +08:00
|
|
|
assert((uintptr_t)tctx > (uintptr_t)1U);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
|
2018-07-06 01:56:33 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
assert(tctx->cnts.curobjs > 0);
|
|
|
|
assert(tctx->cnts.curbytes >= usize);
|
|
|
|
tctx->cnts.curobjs--;
|
|
|
|
tctx->cnts.curbytes -= usize;
|
|
|
|
|
2019-11-20 08:24:57 +08:00
|
|
|
prof_try_log(tsd, usize, prof_info);
|
2018-07-06 01:56:33 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tctx_destroy(tsd, tctx);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
2014-08-17 03:58:55 +08:00
|
|
|
|
2010-10-21 10:05:59 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
bt_init(prof_bt_t *bt, void **vec) {
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
bt->vec = vec;
|
|
|
|
bt->len = 0;
|
|
|
|
}
|
|
|
|
|
2011-03-16 13:23:12 +08:00
|
|
|
#ifdef JEMALLOC_PROF_LIBUNWIND
|
2019-09-04 08:11:06 +08:00
|
|
|
static void
|
|
|
|
prof_backtrace_impl(prof_bt_t *bt) {
|
2014-04-23 09:41:15 +08:00
|
|
|
int nframes;
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-02-11 02:37:56 +08:00
|
|
|
assert(bt->len == 0);
|
|
|
|
assert(bt->vec != NULL);
|
|
|
|
|
2014-04-23 09:41:15 +08:00
|
|
|
nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (nframes <= 0) {
|
2014-04-22 11:52:35 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-04-23 09:41:15 +08:00
|
|
|
bt->len = nframes;
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
#elif (defined(JEMALLOC_PROF_LIBGCC))
|
2011-03-16 13:23:12 +08:00
|
|
|
static _Unwind_Reason_Code
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return _URC_NO_REASON;
|
2011-03-16 13:23:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static _Unwind_Reason_Code
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
|
2011-03-16 13:23:12 +08:00
|
|
|
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
|
2014-04-23 09:41:15 +08:00
|
|
|
void *ip;
|
2011-03-16 13:23:12 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2014-04-23 09:41:15 +08:00
|
|
|
ip = (void *)_Unwind_GetIP(context);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ip == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return _URC_END_OF_STACK;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-04-23 09:41:15 +08:00
|
|
|
data->bt->vec[data->bt->len] = ip;
|
|
|
|
data->bt->len++;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (data->bt->len == data->max) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return _URC_END_OF_STACK;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2011-03-16 13:23:12 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return _URC_NO_REASON;
|
2011-03-16 13:23:12 +08:00
|
|
|
}
|
|
|
|
|
2019-09-04 08:11:06 +08:00
|
|
|
static void
|
|
|
|
prof_backtrace_impl(prof_bt_t *bt) {
|
2014-04-23 09:41:15 +08:00
|
|
|
prof_unwind_data_t data = {bt, PROF_BT_MAX};
|
2011-03-16 13:23:12 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2011-03-16 13:23:12 +08:00
|
|
|
_Unwind_Backtrace(prof_unwind_callback, &data);
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
#elif (defined(JEMALLOC_PROF_GCC))
|
2019-09-04 08:11:06 +08:00
|
|
|
static void
|
|
|
|
prof_backtrace_impl(prof_bt_t *bt) {
|
2017-01-20 13:41:41 +08:00
|
|
|
#define BT_FRAME(i) \
|
2014-04-23 09:41:15 +08:00
|
|
|
if ((i) < PROF_BT_MAX) { \
|
2010-02-11 02:37:56 +08:00
|
|
|
void *p; \
|
2017-01-16 08:56:30 +08:00
|
|
|
if (__builtin_frame_address(i) == 0) { \
|
2010-02-11 10:15:53 +08:00
|
|
|
return; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} \
|
2010-02-11 02:37:56 +08:00
|
|
|
p = __builtin_return_address(i); \
|
2017-01-16 08:56:30 +08:00
|
|
|
if (p == NULL) { \
|
2010-02-11 10:15:53 +08:00
|
|
|
return; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} \
|
2014-04-23 09:41:15 +08:00
|
|
|
bt->vec[(i)] = p; \
|
|
|
|
bt->len = (i) + 1; \
|
2017-01-16 08:56:30 +08:00
|
|
|
} else { \
|
|
|
|
return; \
|
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
BT_FRAME(0)
|
|
|
|
BT_FRAME(1)
|
|
|
|
BT_FRAME(2)
|
|
|
|
BT_FRAME(3)
|
|
|
|
BT_FRAME(4)
|
|
|
|
BT_FRAME(5)
|
|
|
|
BT_FRAME(6)
|
|
|
|
BT_FRAME(7)
|
|
|
|
BT_FRAME(8)
|
|
|
|
BT_FRAME(9)
|
|
|
|
|
|
|
|
BT_FRAME(10)
|
|
|
|
BT_FRAME(11)
|
|
|
|
BT_FRAME(12)
|
|
|
|
BT_FRAME(13)
|
|
|
|
BT_FRAME(14)
|
|
|
|
BT_FRAME(15)
|
|
|
|
BT_FRAME(16)
|
|
|
|
BT_FRAME(17)
|
|
|
|
BT_FRAME(18)
|
|
|
|
BT_FRAME(19)
|
|
|
|
|
|
|
|
BT_FRAME(20)
|
|
|
|
BT_FRAME(21)
|
|
|
|
BT_FRAME(22)
|
|
|
|
BT_FRAME(23)
|
|
|
|
BT_FRAME(24)
|
|
|
|
BT_FRAME(25)
|
|
|
|
BT_FRAME(26)
|
|
|
|
BT_FRAME(27)
|
|
|
|
BT_FRAME(28)
|
|
|
|
BT_FRAME(29)
|
|
|
|
|
|
|
|
BT_FRAME(30)
|
|
|
|
BT_FRAME(31)
|
|
|
|
BT_FRAME(32)
|
|
|
|
BT_FRAME(33)
|
|
|
|
BT_FRAME(34)
|
|
|
|
BT_FRAME(35)
|
|
|
|
BT_FRAME(36)
|
|
|
|
BT_FRAME(37)
|
|
|
|
BT_FRAME(38)
|
|
|
|
BT_FRAME(39)
|
|
|
|
|
|
|
|
BT_FRAME(40)
|
|
|
|
BT_FRAME(41)
|
|
|
|
BT_FRAME(42)
|
|
|
|
BT_FRAME(43)
|
|
|
|
BT_FRAME(44)
|
|
|
|
BT_FRAME(45)
|
|
|
|
BT_FRAME(46)
|
|
|
|
BT_FRAME(47)
|
|
|
|
BT_FRAME(48)
|
|
|
|
BT_FRAME(49)
|
|
|
|
|
|
|
|
BT_FRAME(50)
|
|
|
|
BT_FRAME(51)
|
|
|
|
BT_FRAME(52)
|
|
|
|
BT_FRAME(53)
|
|
|
|
BT_FRAME(54)
|
|
|
|
BT_FRAME(55)
|
|
|
|
BT_FRAME(56)
|
|
|
|
BT_FRAME(57)
|
|
|
|
BT_FRAME(58)
|
|
|
|
BT_FRAME(59)
|
|
|
|
|
|
|
|
BT_FRAME(60)
|
|
|
|
BT_FRAME(61)
|
|
|
|
BT_FRAME(62)
|
|
|
|
BT_FRAME(63)
|
|
|
|
BT_FRAME(64)
|
|
|
|
BT_FRAME(65)
|
|
|
|
BT_FRAME(66)
|
|
|
|
BT_FRAME(67)
|
|
|
|
BT_FRAME(68)
|
|
|
|
BT_FRAME(69)
|
|
|
|
|
|
|
|
BT_FRAME(70)
|
|
|
|
BT_FRAME(71)
|
|
|
|
BT_FRAME(72)
|
|
|
|
BT_FRAME(73)
|
|
|
|
BT_FRAME(74)
|
|
|
|
BT_FRAME(75)
|
|
|
|
BT_FRAME(76)
|
|
|
|
BT_FRAME(77)
|
|
|
|
BT_FRAME(78)
|
|
|
|
BT_FRAME(79)
|
|
|
|
|
|
|
|
BT_FRAME(80)
|
|
|
|
BT_FRAME(81)
|
|
|
|
BT_FRAME(82)
|
|
|
|
BT_FRAME(83)
|
|
|
|
BT_FRAME(84)
|
|
|
|
BT_FRAME(85)
|
|
|
|
BT_FRAME(86)
|
|
|
|
BT_FRAME(87)
|
|
|
|
BT_FRAME(88)
|
|
|
|
BT_FRAME(89)
|
|
|
|
|
|
|
|
BT_FRAME(90)
|
|
|
|
BT_FRAME(91)
|
|
|
|
BT_FRAME(92)
|
|
|
|
BT_FRAME(93)
|
|
|
|
BT_FRAME(94)
|
|
|
|
BT_FRAME(95)
|
|
|
|
BT_FRAME(96)
|
|
|
|
BT_FRAME(97)
|
|
|
|
BT_FRAME(98)
|
|
|
|
BT_FRAME(99)
|
|
|
|
|
|
|
|
BT_FRAME(100)
|
|
|
|
BT_FRAME(101)
|
|
|
|
BT_FRAME(102)
|
|
|
|
BT_FRAME(103)
|
|
|
|
BT_FRAME(104)
|
|
|
|
BT_FRAME(105)
|
|
|
|
BT_FRAME(106)
|
|
|
|
BT_FRAME(107)
|
|
|
|
BT_FRAME(108)
|
|
|
|
BT_FRAME(109)
|
|
|
|
|
|
|
|
BT_FRAME(110)
|
|
|
|
BT_FRAME(111)
|
|
|
|
BT_FRAME(112)
|
|
|
|
BT_FRAME(113)
|
|
|
|
BT_FRAME(114)
|
|
|
|
BT_FRAME(115)
|
|
|
|
BT_FRAME(116)
|
|
|
|
BT_FRAME(117)
|
|
|
|
BT_FRAME(118)
|
|
|
|
BT_FRAME(119)
|
|
|
|
|
|
|
|
BT_FRAME(120)
|
|
|
|
BT_FRAME(121)
|
|
|
|
BT_FRAME(122)
|
|
|
|
BT_FRAME(123)
|
|
|
|
BT_FRAME(124)
|
|
|
|
BT_FRAME(125)
|
|
|
|
BT_FRAME(126)
|
|
|
|
BT_FRAME(127)
|
|
|
|
#undef BT_FRAME
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
#else
|
2019-09-04 08:11:06 +08:00
|
|
|
static void
|
|
|
|
prof_backtrace_impl(prof_bt_t *bt) {
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2013-10-22 05:56:27 +08:00
|
|
|
not_reached();
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
|
|
|
|
2019-09-04 08:11:06 +08:00
|
|
|
void
|
2019-09-05 00:24:34 +08:00
|
|
|
prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
|
2019-09-04 08:11:06 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
pre_reentrancy(tsd, NULL);
|
|
|
|
prof_backtrace_impl(bt);
|
|
|
|
post_reentrancy(tsd);
|
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
malloc_mutex_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_gctx_mutex_choose(void) {
|
2017-04-05 09:36:45 +08:00
|
|
|
unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
malloc_mutex_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_tdata_mutex_choose(uint64_t thr_uid) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
|
2014-01-17 05:23:56 +08:00
|
|
|
}
|
|
|
|
|
2016-05-05 03:14:36 +08:00
|
|
|
/*
|
|
|
|
* The bodies of this function and prof_leakcheck() are compiled out unless heap
|
|
|
|
* profiling is enabled, so that it is possible to compile jemalloc with
|
|
|
|
* floating point support completely disabled. Avoiding floating point code is
|
|
|
|
* important on memory-constrained systems, but it also enables a workaround for
|
|
|
|
* versions of glibc that don't properly save/restore floating point registers
|
|
|
|
* during dynamic lazy symbol loading (which internally calls into whatever
|
|
|
|
* malloc implementation happens to be integrated into the application). Note
|
|
|
|
* that some compilers (e.g. gcc 4.8) may use floating point registers for fast
|
|
|
|
* memory moves, so jemalloc must be compiled with such optimizations disabled
|
|
|
|
* (e.g.
|
|
|
|
* -mno-sse) in order for the workaround to be complete.
|
|
|
|
*/
|
2014-04-16 04:47:13 +08:00
|
|
|
void
|
2019-11-05 09:22:25 +08:00
|
|
|
prof_sample_threshold_update(tsd_t *tsd) {
|
2014-04-16 04:47:13 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!config_prof) {
|
2014-04-16 04:47:13 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-04-16 04:47:13 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (lg_prof_sample == 0) {
|
2019-09-04 06:04:48 +08:00
|
|
|
thread_prof_sample_event_update(tsd,
|
|
|
|
THREAD_EVENT_MIN_START_WAIT);
|
2014-04-16 04:47:13 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Compute sample interval as a geometrically distributed random
|
|
|
|
* variable with mean (2^lg_prof_sample).
|
2014-04-16 04:47:13 +08:00
|
|
|
*
|
2019-09-04 06:04:48 +08:00
|
|
|
* __ __
|
|
|
|
* | log(u) | 1
|
|
|
|
* bytes_until_sample = | -------- |, where p = ---------------
|
|
|
|
* | log(1-p) | lg_prof_sample
|
|
|
|
* 2
|
2014-04-16 04:47:13 +08:00
|
|
|
*
|
|
|
|
* For more information on the math, see:
|
|
|
|
*
|
|
|
|
* Non-Uniform Random Variate Generation
|
|
|
|
* Luc Devroye
|
|
|
|
* Springer-Verlag, New York, 1986
|
|
|
|
* pp 500
|
|
|
|
* (http://luc.devroye.org/rnbookindex.html)
|
|
|
|
*/
|
2019-11-05 09:22:25 +08:00
|
|
|
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
|
2018-10-10 01:59:02 +08:00
|
|
|
double u = (double)r * (1.0/9007199254740992.0L);
|
|
|
|
uint64_t bytes_until_sample = (uint64_t)(log(u) /
|
2014-08-19 07:22:13 +08:00
|
|
|
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
|
2014-04-16 04:47:13 +08:00
|
|
|
+ (uint64_t)1U;
|
2019-09-04 06:04:48 +08:00
|
|
|
thread_prof_sample_event_update(tsd, bytes_until_sample);
|
2014-04-16 04:47:13 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
int
|
|
|
|
prof_getpid(void) {
|
|
|
|
#ifdef _WIN32
|
|
|
|
return GetCurrentProcessId();
|
|
|
|
#else
|
|
|
|
return getpid();
|
2019-07-30 05:09:20 +08:00
|
|
|
#endif
|
2019-07-18 06:52:50 +08:00
|
|
|
}
|
2014-01-17 05:23:56 +08:00
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
static const char *
|
|
|
|
prof_dump_prefix_get(tsdn_t* tsdn) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx);
|
|
|
|
|
|
|
|
return prof_dump_prefix == NULL ? opt_prof_prefix : prof_dump_prefix;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
prof_dump_prefix_is_empty(tsdn_t *tsdn) {
|
|
|
|
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
bool ret = (prof_dump_prefix_get(tsdn)[0] == '\0');
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-12-07 01:45:40 +08:00
|
|
|
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
|
|
|
|
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
|
2019-07-18 06:52:50 +08:00
|
|
|
static void
|
2019-09-10 11:18:41 +08:00
|
|
|
prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
|
2019-07-18 06:52:50 +08:00
|
|
|
cassert(config_prof);
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
|
|
|
const char *prof_prefix = prof_dump_prefix_get(tsd_tsdn(tsd));
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
if (vseq != VSEQ_INVALID) {
|
|
|
|
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
|
|
|
|
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
|
|
|
|
"%s.%d.%"FMTu64".%c%"FMTu64".heap",
|
2019-09-10 11:18:41 +08:00
|
|
|
prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
|
2019-07-18 06:52:50 +08:00
|
|
|
} else {
|
|
|
|
/* "<prefix>.<pid>.<seq>.<v>.heap" */
|
|
|
|
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
|
|
|
|
"%s.%d.%"FMTu64".%c.heap",
|
2019-09-10 11:18:41 +08:00
|
|
|
prof_prefix, prof_getpid(), prof_dump_seq, v);
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
prof_dump_seq++;
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
void
|
|
|
|
prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) {
|
|
|
|
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
|
|
|
|
"%s.%d.%"FMTu64".json", prof_dump_prefix_get(tsdn), prof_getpid(),
|
|
|
|
ind);
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
static void
|
|
|
|
prof_fdump(void) {
|
|
|
|
tsd_t *tsd;
|
|
|
|
char filename[DUMP_FILENAME_BUFSIZE];
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
cassert(config_prof);
|
2019-07-18 06:52:50 +08:00
|
|
|
assert(opt_prof_final);
|
2015-03-15 05:01:35 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
if (!prof_booted) {
|
|
|
|
return;
|
2010-02-12 05:19:21 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
2019-09-10 11:18:41 +08:00
|
|
|
assert(!prof_dump_prefix_is_empty(tsd_tsdn(tsd)));
|
2019-07-30 05:09:20 +08:00
|
|
|
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
2019-09-10 11:18:41 +08:00
|
|
|
prof_dump_filename(tsd, filename, 'f', VSEQ_INVALID);
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
2019-07-18 06:52:50 +08:00
|
|
|
prof_dump(tsd, false, filename, opt_prof_leak);
|
2019-07-30 05:09:20 +08:00
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
bool
|
2019-10-15 00:35:51 +08:00
|
|
|
prof_accum_init(tsdn_t *tsdn) {
|
2019-07-30 05:09:20 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
#ifndef JEMALLOC_ATOMIC_U64
|
2019-10-15 00:35:51 +08:00
|
|
|
if (malloc_mutex_init(&prof_idump_accumulated.mtx, "prof_accum",
|
2019-07-18 06:52:50 +08:00
|
|
|
WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
|
|
|
|
return true;
|
2019-07-30 05:09:20 +08:00
|
|
|
}
|
2019-10-15 00:35:51 +08:00
|
|
|
prof_idump_accumulated.accumbytes = 0;
|
2019-07-18 06:52:50 +08:00
|
|
|
#else
|
2019-10-15 00:35:51 +08:00
|
|
|
atomic_store_u64(&prof_idump_accumulated.accumbytes, 0,
|
|
|
|
ATOMIC_RELAXED);
|
2019-07-18 06:52:50 +08:00
|
|
|
#endif
|
2019-07-30 05:09:20 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-10-15 00:35:51 +08:00
|
|
|
bool
|
|
|
|
prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes) {
|
|
|
|
cassert(config_prof);
|
|
|
|
|
|
|
|
bool overflow;
|
|
|
|
uint64_t a0, a1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the application allocates fast enough (and/or if idump is slow
|
|
|
|
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
|
|
|
* idump trigger coalescing. This is an intentional mechanism that
|
|
|
|
* avoids rate-limiting allocation.
|
|
|
|
*/
|
|
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
|
|
|
a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
do {
|
|
|
|
a1 = a0 + accumbytes;
|
|
|
|
assert(a1 >= a0);
|
|
|
|
overflow = (a1 >= prof_interval);
|
|
|
|
if (overflow) {
|
|
|
|
a1 %= prof_interval;
|
|
|
|
}
|
|
|
|
} while (!atomic_compare_exchange_weak_u64(
|
|
|
|
&prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
|
|
|
|
ATOMIC_RELAXED));
|
|
|
|
#else
|
|
|
|
malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
|
|
|
|
a0 = prof_idump_accumulated.accumbytes;
|
|
|
|
a1 = a0 + accumbytes;
|
|
|
|
overflow = (a1 >= prof_interval);
|
|
|
|
if (overflow) {
|
|
|
|
a1 %= prof_interval;
|
|
|
|
}
|
|
|
|
prof_idump_accumulated.accumbytes = a1;
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
|
|
|
|
#endif
|
|
|
|
return overflow;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
|
|
|
|
cassert(config_prof);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel out as much of the excessive accumbytes increase as possible
|
|
|
|
* without underflowing. Interval-triggered dumps occur slightly more
|
|
|
|
* often than intended as a result of incomplete canceling.
|
|
|
|
*/
|
|
|
|
uint64_t a0, a1;
|
|
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
|
|
|
a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
do {
|
|
|
|
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
|
|
|
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
|
|
|
} while (!atomic_compare_exchange_weak_u64(
|
|
|
|
&prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
|
|
|
|
ATOMIC_RELAXED));
|
|
|
|
#else
|
|
|
|
malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
|
|
|
|
a0 = prof_idump_accumulated.accumbytes;
|
|
|
|
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
|
|
|
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
|
|
|
prof_idump_accumulated.accumbytes = a1;
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
bool
|
|
|
|
prof_dump_prefix_set(tsdn_t *tsdn, const char *prefix) {
|
|
|
|
cassert(config_prof);
|
|
|
|
ctl_mtx_assert_held(tsdn);
|
|
|
|
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
if (prof_dump_prefix == NULL) {
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
/* Everything is still guarded by ctl_mtx. */
|
|
|
|
char *buffer = base_alloc(tsdn, b0get(), PROF_DUMP_FILENAME_LEN,
|
|
|
|
QUANTUM);
|
|
|
|
if (buffer == NULL) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
prof_dump_prefix = buffer;
|
|
|
|
}
|
|
|
|
assert(prof_dump_prefix != NULL);
|
|
|
|
|
2019-12-05 02:16:44 +08:00
|
|
|
prof_strncpy(prof_dump_prefix, prefix, PROF_DUMP_FILENAME_LEN - 1);
|
2019-09-10 11:18:41 +08:00
|
|
|
prof_dump_prefix[PROF_DUMP_FILENAME_LEN - 1] = '\0';
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
void
|
|
|
|
prof_idump(tsdn_t *tsdn) {
|
|
|
|
tsd_t *tsd;
|
|
|
|
prof_tdata_t *tdata;
|
2019-07-30 05:09:20 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
cassert(config_prof);
|
2019-07-30 05:09:20 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
|
2019-07-30 05:09:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
tsd = tsdn_tsd(tsdn);
|
|
|
|
if (tsd_reentrancy_level_get(tsd) > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-15 00:35:51 +08:00
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2019-07-30 05:09:20 +08:00
|
|
|
if (tdata == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (tdata->enq) {
|
|
|
|
tdata->enq_idump = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
|
|
|
if (prof_dump_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
2019-09-10 11:18:41 +08:00
|
|
|
return;
|
2019-07-30 05:09:20 +08:00
|
|
|
}
|
2019-09-10 11:18:41 +08:00
|
|
|
char filename[PATH_MAX + 1];
|
|
|
|
prof_dump_filename(tsd, filename, 'i', prof_dump_iseq);
|
|
|
|
prof_dump_iseq++;
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
|
|
|
prof_dump(tsd, false, filename, false);
|
2019-07-30 05:09:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
prof_mdump(tsd_t *tsd, const char *filename) {
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
|
|
|
|
|
|
|
if (!opt_prof || !prof_booted) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-04-25 09:14:57 +08:00
|
|
|
char filename_buf[DUMP_FILENAME_BUFSIZE];
|
2010-03-03 03:57:30 +08:00
|
|
|
if (filename == NULL) {
|
|
|
|
/* No filename specified, so automatically generate one. */
|
2019-09-10 11:18:41 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
|
|
|
if (prof_dump_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-09-10 11:18:41 +08:00
|
|
|
prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq);
|
2010-03-03 03:57:30 +08:00
|
|
|
prof_dump_mseq++;
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
|
2010-03-03 03:57:30 +08:00
|
|
|
filename = filename_buf;
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_dump(tsd, true, filename, false);
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_gdump(tsdn_t *tsdn) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tdata_t *tdata;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2018-04-07 04:45:37 +08:00
|
|
|
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
|
2010-02-11 02:37:56 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd = tsdn_tsd(tsdn);
|
2017-04-27 09:37:44 +08:00
|
|
|
if (tsd_reentrancy_level_get(tsd) > 0) {
|
2017-04-25 09:14:57 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = prof_tdata_get(tsd, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata == NULL) {
|
2012-04-23 07:00:11 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tdata->enq) {
|
|
|
|
tdata->enq_gdump = true;
|
2010-02-11 02:37:56 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-10 11:18:41 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
if (prof_dump_prefix_get(tsdn)[0] == '\0') {
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
|
2019-09-10 11:18:41 +08:00
|
|
|
return;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2019-09-10 11:18:41 +08:00
|
|
|
char filename[DUMP_FILENAME_BUFSIZE];
|
|
|
|
prof_dump_filename(tsd, filename, 'u', prof_dump_useq);
|
|
|
|
prof_dump_useq++;
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
|
|
|
|
prof_dump(tsd, false, filename, false);
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static uint64_t
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thr_uid_alloc(tsdn_t *tsdn) {
|
2014-09-12 09:06:30 +08:00
|
|
|
uint64_t thr_uid;
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
|
2014-09-12 09:06:30 +08:00
|
|
|
thr_uid = next_thr_uid;
|
|
|
|
next_thr_uid++;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
|
2014-09-12 09:06:30 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return thr_uid;
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
prof_tdata_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_tdata_init(tsd_t *tsd) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
|
2019-11-05 09:22:25 +08:00
|
|
|
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)), false);
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
static char *
|
|
|
|
prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
|
|
|
|
char *ret;
|
|
|
|
size_t size;
|
2019-07-30 05:09:20 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
if (thread_name == NULL) {
|
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-07-30 05:09:20 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
size = strlen(thread_name) + 1;
|
|
|
|
if (size == 1) {
|
|
|
|
return "";
|
2019-07-30 05:09:20 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
|
|
|
|
ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
|
|
|
|
arena_get(TSDN_NULL, 0, true), true);
|
|
|
|
if (ret == NULL) {
|
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
memcpy(ret, thread_name, size);
|
|
|
|
return ret;
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2014-10-03 14:01:10 +08:00
|
|
|
prof_tdata_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
|
2014-10-03 14:01:10 +08:00
|
|
|
uint64_t thr_uid = tdata->thr_uid;
|
|
|
|
uint64_t thr_discrim = tdata->thr_discrim + 1;
|
2014-10-04 14:25:30 +08:00
|
|
|
char *thread_name = (tdata->thread_name != NULL) ?
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
|
2014-10-04 14:25:30 +08:00
|
|
|
bool active = tdata->active;
|
2012-04-23 07:00:11 +08:00
|
|
|
|
2014-10-03 14:01:10 +08:00
|
|
|
prof_tdata_detach(tsd, tdata);
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
|
2019-11-05 09:22:25 +08:00
|
|
|
active, true);
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_tdata_cleanup(tsd_t *tsd) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_t *tdata;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!config_prof) {
|
2014-09-23 12:09:23 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = tsd_prof_tdata_get(tsd);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata != NULL) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_detach(tsd, tdata);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-10-03 13:38:14 +08:00
|
|
|
}
|
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_active_get(tsdn_t *tsdn) {
|
2014-10-04 14:25:30 +08:00
|
|
|
bool prof_active_current;
|
|
|
|
|
2019-08-28 05:42:14 +08:00
|
|
|
prof_active_assert();
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_active_mtx);
|
2014-10-04 14:25:30 +08:00
|
|
|
prof_active_current = prof_active;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_active_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_active_current;
|
2014-10-04 14:25:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_active_set(tsdn_t *tsdn, bool active) {
|
2014-10-04 14:25:30 +08:00
|
|
|
bool prof_active_old;
|
|
|
|
|
2019-08-28 05:42:14 +08:00
|
|
|
prof_active_assert();
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_active_mtx);
|
2014-10-04 14:25:30 +08:00
|
|
|
prof_active_old = prof_active;
|
|
|
|
prof_active = active;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_active_mtx);
|
2019-08-28 05:42:14 +08:00
|
|
|
prof_active_assert();
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_active_old;
|
2014-10-04 14:25:30 +08:00
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
const char *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thread_name_get(tsd_t *tsd) {
|
2019-10-04 04:01:12 +08:00
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_t *tdata;
|
|
|
|
|
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return "";
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-04 14:25:30 +08:00
|
|
|
return (tdata->thread_name != NULL ? tdata->thread_name : "");
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
|
2019-10-04 04:01:12 +08:00
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tdata_t *tdata;
|
2014-10-04 14:25:30 +08:00
|
|
|
unsigned i;
|
2014-08-19 07:22:13 +08:00
|
|
|
char *s;
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return EAGAIN;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-04 14:25:30 +08:00
|
|
|
|
|
|
|
/* Validate input. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (thread_name == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return EFAULT;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-04 14:25:30 +08:00
|
|
|
for (i = 0; thread_name[i] != '\0'; i++) {
|
|
|
|
char c = thread_name[i];
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!isgraph(c) && !isblank(c)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return EFAULT;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-04 14:25:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (s == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return EAGAIN;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
if (tdata->thread_name != NULL) {
|
2017-04-08 05:12:30 +08:00
|
|
|
idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
|
|
|
|
true);
|
2014-10-04 14:25:30 +08:00
|
|
|
tdata->thread_name = NULL;
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (strlen(s) > 0) {
|
2014-10-04 14:25:30 +08:00
|
|
|
tdata->thread_name = s;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return 0;
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thread_active_get(tsd_t *tsd) {
|
2019-10-04 04:01:12 +08:00
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_t *tdata;
|
|
|
|
|
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return tdata->active;
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thread_active_set(tsd_t *tsd, bool active) {
|
2019-10-04 04:01:12 +08:00
|
|
|
assert(tsd_reentrancy_level_get(tsd) == 0);
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tdata_t *tdata;
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
tdata->active = active;
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thread_active_init_get(tsdn_t *tsdn) {
|
2014-10-04 14:25:30 +08:00
|
|
|
bool active_init;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
|
2014-10-04 14:25:30 +08:00
|
|
|
active_init = prof_thread_active_init;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return active_init;
|
2014-10-04 14:25:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
|
2014-10-04 14:25:30 +08:00
|
|
|
bool active_init_old;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
|
2014-10-04 14:25:30 +08:00
|
|
|
active_init_old = prof_thread_active_init;
|
|
|
|
prof_thread_active_init = active_init;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return active_init_old;
|
2014-10-04 14:25:30 +08:00
|
|
|
}
|
|
|
|
|
2015-01-26 13:16:57 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_gdump_get(tsdn_t *tsdn) {
|
2015-01-26 13:16:57 +08:00
|
|
|
bool prof_gdump_current;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
|
2015-01-26 13:16:57 +08:00
|
|
|
prof_gdump_current = prof_gdump_val;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_gdump_current;
|
2015-01-26 13:16:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_gdump_set(tsdn_t *tsdn, bool gdump) {
|
2015-01-26 13:16:57 +08:00
|
|
|
bool prof_gdump_old;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
|
2015-01-26 13:16:57 +08:00
|
|
|
prof_gdump_old = prof_gdump_val;
|
|
|
|
prof_gdump_val = gdump;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_gdump_old;
|
2015-01-26 13:16:57 +08:00
|
|
|
}
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_boot0(void) {
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
|
|
|
|
sizeof(PROF_PREFIX_DEFAULT));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_boot1(void) {
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/*
|
2014-04-12 05:24:51 +08:00
|
|
|
* opt_prof must be in its final state before any arenas are
|
|
|
|
* initialized, so this function must be executed early.
|
2010-02-11 02:37:56 +08:00
|
|
|
*/
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (opt_prof_leak && !opt_prof) {
|
2010-02-11 02:37:56 +08:00
|
|
|
/*
|
|
|
|
* Enable opt_prof, but in such a way that profiles are never
|
|
|
|
* automatically dumped.
|
|
|
|
*/
|
|
|
|
opt_prof = true;
|
2010-10-24 09:37:06 +08:00
|
|
|
opt_prof_gdump = false;
|
2010-04-01 08:35:51 +08:00
|
|
|
} else if (opt_prof) {
|
|
|
|
if (opt_lg_prof_interval >= 0) {
|
|
|
|
prof_interval = (((uint64_t)1U) <<
|
|
|
|
opt_lg_prof_interval);
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
2010-04-01 08:35:51 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_boot2(tsd_t *tsd) {
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
if (opt_prof) {
|
2012-03-24 09:05:51 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
lg_prof_sample = opt_lg_prof_sample;
|
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
prof_active = opt_prof_active;
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-04 14:25:30 +08:00
|
|
|
|
2015-01-26 13:16:57 +08:00
|
|
|
prof_gdump_val = opt_prof_gdump;
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-26 13:16:57 +08:00
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
prof_thread_active_init = opt_prof_thread_active_init;
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&prof_thread_active_init_mtx,
|
|
|
|
"prof_thread_active_init",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
|
|
|
|
malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-04 14:25:30 +08:00
|
|
|
|
2019-07-18 06:52:50 +08:00
|
|
|
if (prof_data_init(tsd)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2019-07-18 06:52:50 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
|
|
|
|
next_thr_uid = 0;
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
|
2019-09-10 11:04:18 +08:00
|
|
|
if (malloc_mutex_init(&prof_dump_filename_mtx, "prof_dump_filename",
|
|
|
|
WITNESS_RANK_PROF_DUMP_FILENAME, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2014-10-09 08:57:19 +08:00
|
|
|
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
|
|
|
|
atexit(prof_fdump) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in atexit()\n");
|
2017-01-16 08:56:30 +08:00
|
|
|
if (opt_abort) {
|
2010-02-11 02:37:56 +08:00
|
|
|
abort();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2012-03-24 09:05:51 +08:00
|
|
|
|
2019-07-13 07:37:37 +08:00
|
|
|
if (prof_log_init(tsd)) {
|
2018-07-06 01:56:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
|
2016-12-23 06:39:10 +08:00
|
|
|
b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
|
|
|
|
CACHELINE);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (gctx_locks == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-03-24 09:05:51 +08:00
|
|
|
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_GCTX,
|
|
|
|
malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
|
2016-12-23 06:39:10 +08:00
|
|
|
b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
|
|
|
|
CACHELINE);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata_locks == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
|
2017-05-16 06:38:15 +08:00
|
|
|
WITNESS_RANK_PROF_TDATA,
|
|
|
|
malloc_mutex_rank_exclusive)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-03-24 09:05:51 +08:00
|
|
|
}
|
2010-02-11 10:15:53 +08:00
|
|
|
#ifdef JEMALLOC_PROF_LIBGCC
|
2019-02-20 07:58:13 +08:00
|
|
|
/*
|
|
|
|
* Cause the backtracing machinery to allocate its internal
|
|
|
|
* state before enabling profiling.
|
|
|
|
*/
|
|
|
|
_Unwind_Backtrace(prof_unwind_init_callback, NULL);
|
2010-02-11 10:15:53 +08:00
|
|
|
#endif
|
2019-02-20 07:58:13 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
prof_booted = true;
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
2012-10-10 05:46:22 +08:00
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_prefork0(tsdn_t *tsdn) {
|
2017-01-30 09:35:57 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2012-10-10 05:46:22 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &prof_dump_mtx);
|
|
|
|
malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
|
|
|
|
malloc_mutex_prefork(tsdn, &tdatas_mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_prefork1(tsdn_t *tsdn) {
|
2017-01-30 09:35:57 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &prof_active_mtx);
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &prof_dump_filename_mtx);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
|
|
|
|
malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
|
|
|
|
malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
|
2012-10-10 05:46:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_postfork_parent(tsdn_t *tsdn) {
|
2017-01-30 09:35:57 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2012-10-10 05:46:22 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn,
|
|
|
|
&prof_thread_active_init_mtx);
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
|
2012-10-10 05:46:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_postfork_child(tsdn_t *tsdn) {
|
2017-01-30 09:35:57 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2012-10-10 05:46:22 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
|
|
|
|
malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
|
|
|
|
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
|
2019-09-10 11:04:18 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
|
|
|
|
malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
|
|
|
|
malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
|
2012-10-10 05:46:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/******************************************************************************/
|