2019-12-19 05:38:14 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/assert.h"
|
2020-01-10 08:36:09 +08:00
|
|
|
#include "jemalloc/internal/buf_writer.h"
|
2019-12-19 05:38:14 +08:00
|
|
|
#include "jemalloc/internal/emitter.h"
|
|
|
|
#include "jemalloc/internal/prof_data.h"
|
|
|
|
#include "jemalloc/internal/prof_recent.h"
|
|
|
|
|
|
|
|
ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
|
|
|
|
malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
|
|
|
|
static atomic_zd_t prof_recent_alloc_max;
|
|
|
|
static ssize_t prof_recent_alloc_count = 0;
|
2020-04-03 04:40:22 +08:00
|
|
|
prof_recent_list_t prof_recent_alloc_list;
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2020-04-14 03:05:51 +08:00
|
|
|
malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
static void
|
|
|
|
prof_recent_alloc_max_init() {
|
|
|
|
atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline ssize_t
|
|
|
|
prof_recent_alloc_max_get_no_lock() {
|
|
|
|
return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline ssize_t
|
|
|
|
prof_recent_alloc_max_get(tsd_t *tsd) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
return prof_recent_alloc_max_get_no_lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline ssize_t
|
|
|
|
prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
ssize_t old_max = prof_recent_alloc_max_get(tsd);
|
|
|
|
atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED);
|
|
|
|
return old_max;
|
|
|
|
}
|
|
|
|
|
2020-04-11 06:54:40 +08:00
|
|
|
static prof_recent_t *
|
|
|
|
prof_recent_allocate_node(tsdn_t *tsdn) {
|
|
|
|
return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t),
|
|
|
|
sz_size2index(sizeof(prof_recent_t)), false, NULL, true,
|
|
|
|
arena_get(tsdn, 0, false), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) {
|
|
|
|
assert(node != NULL);
|
|
|
|
assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t)));
|
|
|
|
idalloctm(tsdn, node, NULL, NULL, true, true);
|
|
|
|
}
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
static inline void
|
|
|
|
increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
|
|
|
|
++tctx->recent_count;
|
|
|
|
assert(tctx->recent_count > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(opt_prof && prof_booted);
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
|
|
|
|
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether last-N mode is turned on without trying to acquire the
|
|
|
|
* lock, so as to optimize for the following two scenarios:
|
|
|
|
* (1) Last-N mode is switched off;
|
|
|
|
* (2) Dumping, during which last-N mode is temporarily turned off so
|
|
|
|
* as not to block sampled allocations.
|
|
|
|
*/
|
|
|
|
if (prof_recent_alloc_max_get_no_lock() == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Increment recent_count to hold the tctx so that it won't be gone
|
|
|
|
* even after tctx->tdata->lock is released. This acts as a
|
|
|
|
* "placeholder"; the real recording of the allocation requires a lock
|
|
|
|
* on prof_recent_alloc_mtx and is done in prof_recent_alloc (when
|
|
|
|
* tctx->tdata->lock has been released).
|
|
|
|
*/
|
|
|
|
increment_recent_count(tsd, tctx);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
|
|
|
|
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
assert(tctx != NULL);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
|
|
|
|
assert(tctx->recent_count > 0);
|
|
|
|
--tctx->recent_count;
|
|
|
|
prof_tctx_try_destroy(tsd, tctx);
|
|
|
|
}
|
|
|
|
|
2020-06-18 00:57:54 +08:00
|
|
|
static inline edata_t *
|
2020-04-11 07:26:55 +08:00
|
|
|
prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) {
|
|
|
|
return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE);
|
|
|
|
}
|
|
|
|
|
2020-06-18 00:57:54 +08:00
|
|
|
edata_t *
|
|
|
|
prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2020-06-18 00:57:54 +08:00
|
|
|
return prof_recent_alloc_edata_get_no_lock(n);
|
|
|
|
}
|
|
|
|
|
2020-04-11 07:26:55 +08:00
|
|
|
static inline edata_t *
|
|
|
|
prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
return prof_recent_alloc_edata_get_no_lock(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE);
|
|
|
|
}
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
void
|
|
|
|
edata_prof_recent_alloc_init(edata_t *edata) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
edata_prof_recent_alloc_set_dont_call_directly(edata, NULL);
|
|
|
|
}
|
|
|
|
|
2020-06-18 00:57:54 +08:00
|
|
|
static inline prof_recent_t *
|
2020-04-07 07:19:53 +08:00
|
|
|
edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2020-04-07 07:19:53 +08:00
|
|
|
return edata_prof_recent_alloc_get_dont_call_directly(edata);
|
|
|
|
}
|
|
|
|
|
2020-06-18 00:57:54 +08:00
|
|
|
prof_recent_t *
|
|
|
|
edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2020-06-18 00:57:54 +08:00
|
|
|
return edata_prof_recent_alloc_get_no_lock(edata);
|
|
|
|
}
|
|
|
|
|
2020-04-07 07:19:53 +08:00
|
|
|
static inline prof_recent_t *
|
2019-12-19 05:38:14 +08:00
|
|
|
edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_t *recent_alloc =
|
|
|
|
edata_prof_recent_alloc_get_no_lock(edata);
|
2020-04-11 07:26:55 +08:00
|
|
|
assert(recent_alloc == NULL ||
|
|
|
|
prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
|
2019-12-19 05:38:14 +08:00
|
|
|
return recent_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static prof_recent_t *
|
|
|
|
edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,
|
|
|
|
prof_recent_t *recent_alloc) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_t *old_recent_alloc =
|
|
|
|
edata_prof_recent_alloc_get(tsd, edata);
|
|
|
|
edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);
|
|
|
|
return old_recent_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
|
|
|
|
prof_recent_t *recent_alloc) {
|
2020-04-11 07:26:55 +08:00
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(recent_alloc != NULL);
|
|
|
|
prof_recent_t *old_recent_alloc =
|
|
|
|
edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc);
|
|
|
|
assert(old_recent_alloc == NULL);
|
2020-04-11 07:26:55 +08:00
|
|
|
prof_recent_alloc_edata_set(tsd, recent_alloc, edata);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,
|
|
|
|
prof_recent_t *recent_alloc) {
|
2020-04-11 07:26:55 +08:00
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(recent_alloc != NULL);
|
|
|
|
prof_recent_t *old_recent_alloc =
|
|
|
|
edata_prof_recent_alloc_update_internal(tsd, edata, NULL);
|
|
|
|
assert(old_recent_alloc == recent_alloc);
|
2020-04-11 07:26:55 +08:00
|
|
|
assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc));
|
|
|
|
prof_recent_alloc_edata_set(tsd, recent_alloc, NULL);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function should be called right before an allocation is released, so
|
|
|
|
* that the associated recent allocation record can contain the following
|
|
|
|
* information:
|
|
|
|
* (1) The allocation is released;
|
|
|
|
* (2) The time of the deallocation; and
|
|
|
|
* (3) The prof_tctx associated with the deallocation.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
/*
|
|
|
|
* Check whether the recent allocation record still exists without
|
|
|
|
* trying to acquire the lock.
|
|
|
|
*/
|
|
|
|
if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd);
|
|
|
|
/*
|
|
|
|
* In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the
|
|
|
|
* deallocation time / tctx, which is handled later, after we check
|
|
|
|
* again when holding the lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (dalloc_tctx != NULL) {
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
|
|
|
|
increment_recent_count(tsd, dalloc_tctx);
|
|
|
|
dalloc_tctx->prepared = false;
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
/* Check again after acquiring the lock. */
|
|
|
|
prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata);
|
|
|
|
if (recent != NULL) {
|
|
|
|
assert(nstime_equals_zero(&recent->dalloc_time));
|
|
|
|
assert(recent->dalloc_tctx == NULL);
|
|
|
|
if (dalloc_tctx != NULL) {
|
2020-06-02 21:42:44 +08:00
|
|
|
nstime_prof_update(&recent->dalloc_time);
|
2019-12-19 05:38:14 +08:00
|
|
|
recent->dalloc_tctx = dalloc_tctx;
|
2020-01-15 08:10:23 +08:00
|
|
|
dalloc_tctx = NULL;
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
2020-04-11 07:26:55 +08:00
|
|
|
edata_prof_recent_alloc_reset(tsd, edata, recent);
|
2020-01-15 08:10:23 +08:00
|
|
|
}
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
|
|
|
|
if (dalloc_tctx != NULL) {
|
2019-12-19 05:38:14 +08:00
|
|
|
/* We lost the rase - the allocation record was just gone. */
|
|
|
|
decrement_recent_count(tsd, dalloc_tctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-04-11 07:26:55 +08:00
|
|
|
prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) {
|
2019-12-19 05:38:14 +08:00
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2020-04-11 07:26:55 +08:00
|
|
|
edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc);
|
|
|
|
if (edata != NULL) {
|
|
|
|
edata_prof_recent_alloc_reset(tsd, edata, recent_alloc);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
prof_recent_alloc_is_empty(tsd_t *tsd) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2020-04-03 01:48:58 +08:00
|
|
|
if (ql_empty(&prof_recent_alloc_list)) {
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(prof_recent_alloc_count == 0);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
assert(prof_recent_alloc_count > 0);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
prof_recent_alloc_assert_count(tsd_t *tsd) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2020-04-03 01:48:58 +08:00
|
|
|
if (!config_debug) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ssize_t count = 0;
|
|
|
|
prof_recent_t *n;
|
|
|
|
ql_foreach(n, &prof_recent_alloc_list, link) {
|
|
|
|
++count;
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
2020-04-03 01:48:58 +08:00
|
|
|
assert(count == prof_recent_alloc_count);
|
|
|
|
assert(prof_recent_alloc_max_get(tsd) == -1 ||
|
|
|
|
count <= prof_recent_alloc_max_get(tsd));
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-08-12 06:35:41 +08:00
|
|
|
prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(edata != NULL);
|
|
|
|
prof_tctx_t *tctx = edata_prof_tctx_get(edata);
|
|
|
|
|
|
|
|
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve a new prof_recent_t node if needed. If needed, we release
|
|
|
|
* the prof_recent_alloc_mtx lock and allocate. Then, rather than
|
|
|
|
* immediately checking for OOM, we regain the lock and try to make use
|
|
|
|
* of the reserve node if needed. There are six scenarios:
|
|
|
|
*
|
|
|
|
* \ now | no need | need but OOMed | need and allocated
|
|
|
|
* later \ | | |
|
|
|
|
* ------------------------------------------------------------
|
|
|
|
* no need | (1) | (2) | (3)
|
|
|
|
* ------------------------------------------------------------
|
|
|
|
* need | (4) | (5) | (6)
|
|
|
|
*
|
|
|
|
* First, "(4)" never happens, because we don't release the lock in the
|
|
|
|
* middle if there's no need for a new node; in such cases "(1)" always
|
|
|
|
* takes place, which is trivial.
|
|
|
|
*
|
|
|
|
* Out of the remaining four scenarios, "(6)" is the common case and is
|
|
|
|
* trivial. "(5)" is also trivial, in which case we'll rollback the
|
|
|
|
* effect of prof_recent_alloc_prepare() as expected.
|
|
|
|
*
|
|
|
|
* "(2)" / "(3)" occurs when the need for a new node is gone after we
|
|
|
|
* regain the lock. If the new node is successfully allocated, i.e. in
|
|
|
|
* the case of "(3)", we'll release it in the end; otherwise, i.e. in
|
|
|
|
* the case of "(2)", we do nothing - we're lucky that the OOM ends up
|
|
|
|
* doing no harm at all.
|
|
|
|
*
|
|
|
|
* Therefore, the only performance cost of the "release lock" ->
|
|
|
|
* "allocate" -> "regain lock" design is the "(3)" case, but it happens
|
|
|
|
* very rarely, so the cost is relatively small compared to the gain of
|
|
|
|
* not having to have the lock order of prof_recent_alloc_mtx above all
|
|
|
|
* the allocation locks.
|
|
|
|
*/
|
|
|
|
prof_recent_t *reserve = NULL;
|
|
|
|
if (prof_recent_alloc_max_get(tsd) == -1 ||
|
|
|
|
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
|
|
|
|
assert(prof_recent_alloc_max_get(tsd) != 0);
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2020-04-11 06:54:40 +08:00
|
|
|
reserve = prof_recent_allocate_node(tsd_tsdn(tsd));
|
2019-12-19 05:38:14 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prof_recent_alloc_max_get(tsd) == 0) {
|
|
|
|
assert(prof_recent_alloc_is_empty(tsd));
|
|
|
|
goto label_rollback;
|
|
|
|
}
|
|
|
|
|
|
|
|
prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;
|
|
|
|
if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {
|
2020-04-03 01:48:58 +08:00
|
|
|
/* If upper limit is reached, rotate the head. */
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(prof_recent_alloc_max_get(tsd) != -1);
|
|
|
|
assert(!prof_recent_alloc_is_empty(tsd));
|
2020-04-03 01:48:58 +08:00
|
|
|
prof_recent_t *head = ql_first(&prof_recent_alloc_list);
|
|
|
|
old_alloc_tctx = head->alloc_tctx;
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(old_alloc_tctx != NULL);
|
2020-04-03 01:48:58 +08:00
|
|
|
old_dalloc_tctx = head->dalloc_tctx;
|
|
|
|
prof_recent_alloc_evict_edata(tsd, head);
|
|
|
|
ql_rotate(&prof_recent_alloc_list, link);
|
2019-12-19 05:38:14 +08:00
|
|
|
} else {
|
2020-04-03 01:48:58 +08:00
|
|
|
/* Otherwise make use of the new node. */
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(prof_recent_alloc_max_get(tsd) == -1 ||
|
|
|
|
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
|
|
|
|
if (reserve == NULL) {
|
|
|
|
goto label_rollback;
|
|
|
|
}
|
2020-04-03 01:48:58 +08:00
|
|
|
ql_elm_new(reserve, link);
|
|
|
|
ql_tail_insert(&prof_recent_alloc_list, reserve, link);
|
2019-12-19 05:38:14 +08:00
|
|
|
reserve = NULL;
|
|
|
|
old_alloc_tctx = NULL;
|
|
|
|
old_dalloc_tctx = NULL;
|
|
|
|
++prof_recent_alloc_count;
|
|
|
|
}
|
|
|
|
|
2020-04-03 01:48:58 +08:00
|
|
|
/* Fill content into the tail node. */
|
|
|
|
prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);
|
|
|
|
assert(tail != NULL);
|
|
|
|
tail->size = size;
|
2020-08-12 06:35:41 +08:00
|
|
|
tail->usize = usize;
|
2020-04-03 01:48:58 +08:00
|
|
|
nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));
|
|
|
|
tail->alloc_tctx = tctx;
|
|
|
|
nstime_init_zero(&tail->dalloc_time);
|
|
|
|
tail->dalloc_tctx = NULL;
|
2020-04-11 07:26:55 +08:00
|
|
|
edata_prof_recent_alloc_set(tsd, edata, tail);
|
2020-04-03 01:48:58 +08:00
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
assert(!prof_recent_alloc_is_empty(tsd));
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
|
|
|
|
if (reserve != NULL) {
|
2020-04-11 06:54:40 +08:00
|
|
|
prof_recent_free_node(tsd_tsdn(tsd), reserve);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Asynchronously handle the tctx of the old node, so that there's no
|
|
|
|
* simultaneous holdings of prof_recent_alloc_mtx and tdata->lock.
|
|
|
|
* In the worst case this may delay the tctx release but it's better
|
|
|
|
* than holding prof_recent_alloc_mtx for longer.
|
|
|
|
*/
|
|
|
|
if (old_alloc_tctx != NULL) {
|
|
|
|
decrement_recent_count(tsd, old_alloc_tctx);
|
|
|
|
}
|
|
|
|
if (old_dalloc_tctx != NULL) {
|
|
|
|
decrement_recent_count(tsd, old_dalloc_tctx);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
label_rollback:
|
|
|
|
assert(edata_prof_recent_alloc_get(tsd, edata) == NULL);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
if (reserve != NULL) {
|
2020-04-11 06:54:40 +08:00
|
|
|
prof_recent_free_node(tsd_tsdn(tsd), reserve);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
decrement_recent_count(tsd, tctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
|
|
|
prof_recent_alloc_max_ctl_read() {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
/* Don't bother to acquire the lock. */
|
|
|
|
return prof_recent_alloc_max_get_no_lock();
|
|
|
|
}
|
|
|
|
|
2020-04-14 02:48:01 +08:00
|
|
|
static void
|
|
|
|
prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
|
|
|
|
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
ssize_t max = prof_recent_alloc_max_get(tsd);
|
2019-12-19 05:38:14 +08:00
|
|
|
if (max == -1 || prof_recent_alloc_count <= max) {
|
|
|
|
/* Easy case - no need to alter the list. */
|
2020-04-14 02:48:01 +08:00
|
|
|
ql_new(to_delete);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
return;
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
|
2020-04-03 01:48:58 +08:00
|
|
|
prof_recent_t *node;
|
|
|
|
ql_foreach(node, &prof_recent_alloc_list, link) {
|
|
|
|
if (prof_recent_alloc_count == max) {
|
|
|
|
break;
|
|
|
|
}
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_recent_alloc_evict_edata(tsd, node);
|
|
|
|
--prof_recent_alloc_count;
|
2020-04-03 01:48:58 +08:00
|
|
|
}
|
|
|
|
assert(prof_recent_alloc_count == max);
|
|
|
|
|
2020-04-14 02:48:01 +08:00
|
|
|
ql_move(to_delete, &prof_recent_alloc_list);
|
2020-04-03 01:48:58 +08:00
|
|
|
if (max == 0) {
|
|
|
|
assert(node == NULL);
|
|
|
|
} else {
|
|
|
|
assert(node != NULL);
|
2020-04-14 02:48:01 +08:00
|
|
|
ql_split(to_delete, node, &prof_recent_alloc_list, link);
|
2020-04-03 01:48:58 +08:00
|
|
|
}
|
2020-04-14 02:48:01 +08:00
|
|
|
assert(!ql_empty(to_delete));
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_recent_alloc_assert_count(tsd);
|
2020-04-14 02:48:01 +08:00
|
|
|
}
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2020-04-14 02:48:01 +08:00
|
|
|
static void
|
|
|
|
prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {
|
2020-04-14 03:05:51 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);
|
2020-04-14 02:48:01 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
while (!ql_empty(to_delete)) {
|
|
|
|
prof_recent_t *node = ql_first(to_delete);
|
|
|
|
ql_remove(to_delete, node, link);
|
2019-12-19 05:38:14 +08:00
|
|
|
decrement_recent_count(tsd, node->alloc_tctx);
|
|
|
|
if (node->dalloc_tctx != NULL) {
|
|
|
|
decrement_recent_count(tsd, node->dalloc_tctx);
|
|
|
|
}
|
2020-04-11 06:54:40 +08:00
|
|
|
prof_recent_free_node(tsd_tsdn(tsd), node);
|
2020-04-14 02:48:01 +08:00
|
|
|
}
|
|
|
|
}
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2020-04-14 02:48:01 +08:00
|
|
|
ssize_t
|
|
|
|
prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2020-04-14 02:48:01 +08:00
|
|
|
assert(max >= -1);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
|
|
|
|
prof_recent_list_t to_delete;
|
|
|
|
prof_recent_alloc_restore_locked(tsd, &to_delete);
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_alloc_async_cleanup(tsd, &to_delete);
|
2019-12-19 05:38:14 +08:00
|
|
|
return old_max;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-04-11 06:41:20 +08:00
|
|
|
prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {
|
2019-12-19 05:38:14 +08:00
|
|
|
char bt_buf[2 * sizeof(intptr_t) + 3];
|
|
|
|
char *s = bt_buf;
|
|
|
|
assert(tctx != NULL);
|
|
|
|
prof_bt_t *bt = &tctx->gctx->bt;
|
|
|
|
for (size_t i = 0; i < bt->len; ++i) {
|
|
|
|
malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]);
|
|
|
|
emitter_json_value(emitter, emitter_type_string, &s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-11 06:41:20 +08:00
|
|
|
static void
|
|
|
|
prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
|
|
|
|
emitter_json_object_begin(emitter);
|
|
|
|
|
|
|
|
emitter_json_kv(emitter, "size", emitter_type_size, &node->size);
|
2020-08-12 06:35:41 +08:00
|
|
|
emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize);
|
2020-04-11 07:26:55 +08:00
|
|
|
bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL;
|
2020-04-11 06:41:20 +08:00
|
|
|
emitter_json_kv(emitter, "released", emitter_type_bool, &released);
|
|
|
|
|
|
|
|
emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64,
|
|
|
|
&node->alloc_tctx->thr_uid);
|
2020-10-15 02:02:39 +08:00
|
|
|
prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata;
|
|
|
|
assert(alloc_tdata != NULL);
|
|
|
|
if (alloc_tdata->thread_name != NULL) {
|
|
|
|
emitter_json_kv(emitter, "alloc_thread_name",
|
|
|
|
emitter_type_string, &alloc_tdata->thread_name);
|
|
|
|
}
|
2020-04-11 06:41:20 +08:00
|
|
|
uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);
|
|
|
|
emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,
|
|
|
|
&alloc_time_ns);
|
|
|
|
emitter_json_array_kv_begin(emitter, "alloc_trace");
|
|
|
|
prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);
|
|
|
|
emitter_json_array_end(emitter);
|
|
|
|
|
2020-04-11 07:26:55 +08:00
|
|
|
if (released && node->dalloc_tctx != NULL) {
|
2020-04-11 06:41:20 +08:00
|
|
|
emitter_json_kv(emitter, "dalloc_thread_uid",
|
|
|
|
emitter_type_uint64, &node->dalloc_tctx->thr_uid);
|
2020-10-15 02:02:39 +08:00
|
|
|
prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata;
|
|
|
|
assert(dalloc_tdata != NULL);
|
|
|
|
if (dalloc_tdata->thread_name != NULL) {
|
|
|
|
emitter_json_kv(emitter, "dalloc_thread_name",
|
|
|
|
emitter_type_string, &dalloc_tdata->thread_name);
|
|
|
|
}
|
2020-04-11 06:41:20 +08:00
|
|
|
assert(!nstime_equals_zero(&node->dalloc_time));
|
|
|
|
uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time);
|
|
|
|
emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64,
|
|
|
|
&dalloc_time_ns);
|
|
|
|
emitter_json_array_kv_begin(emitter, "dalloc_trace");
|
|
|
|
prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx);
|
|
|
|
emitter_json_array_end(emitter);
|
|
|
|
}
|
|
|
|
|
|
|
|
emitter_json_object_end(emitter);
|
|
|
|
}
|
|
|
|
|
2020-04-14 02:51:25 +08:00
|
|
|
#define PROF_RECENT_PRINT_BUFSIZE 65536
|
2020-12-18 04:04:07 +08:00
|
|
|
JEMALLOC_COLD
|
2019-12-19 05:38:14 +08:00
|
|
|
void
|
2020-04-18 05:49:20 +08:00
|
|
|
prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2020-04-14 03:05:51 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
|
2020-01-24 07:00:01 +08:00
|
|
|
buf_writer_t buf_writer;
|
2020-02-04 07:56:13 +08:00
|
|
|
buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
|
|
|
|
PROF_RECENT_PRINT_BUFSIZE);
|
|
|
|
emitter_t emitter;
|
2020-03-21 01:48:55 +08:00
|
|
|
emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
|
|
|
|
&buf_writer);
|
2020-04-14 02:54:03 +08:00
|
|
|
prof_recent_list_t temp_list;
|
2019-12-19 05:38:14 +08:00
|
|
|
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
2020-04-14 02:54:03 +08:00
|
|
|
ssize_t dump_max = prof_recent_alloc_max_get(tsd);
|
|
|
|
ql_move(&temp_list, &prof_recent_alloc_list);
|
|
|
|
ssize_t dump_count = prof_recent_alloc_count;
|
|
|
|
prof_recent_alloc_count = 0;
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2020-04-14 02:54:03 +08:00
|
|
|
emitter_begin(&emitter);
|
2020-11-14 03:28:37 +08:00
|
|
|
uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;
|
|
|
|
emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,
|
|
|
|
&sample_interval);
|
2020-04-14 02:54:03 +08:00
|
|
|
emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,
|
|
|
|
&dump_max);
|
2019-12-19 05:38:14 +08:00
|
|
|
emitter_json_array_kv_begin(&emitter, "recent_alloc");
|
2020-04-11 06:41:20 +08:00
|
|
|
prof_recent_t *node;
|
2020-04-14 02:54:03 +08:00
|
|
|
ql_foreach(node, &temp_list, link) {
|
2020-04-11 06:41:20 +08:00
|
|
|
prof_recent_alloc_dump_node(&emitter, node);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
emitter_json_array_end(&emitter);
|
2020-04-14 02:54:03 +08:00
|
|
|
emitter_end(&emitter);
|
2019-12-19 05:38:14 +08:00
|
|
|
|
2020-04-14 02:54:03 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
prof_recent_alloc_assert_count(tsd);
|
|
|
|
ql_concat(&temp_list, &prof_recent_alloc_list, link);
|
|
|
|
ql_move(&prof_recent_alloc_list, &temp_list);
|
|
|
|
prof_recent_alloc_count += dump_count;
|
|
|
|
prof_recent_alloc_restore_locked(tsd, &temp_list);
|
2019-12-19 05:38:14 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
|
|
|
|
2020-02-04 07:56:13 +08:00
|
|
|
buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
|
2020-04-14 03:05:51 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
|
|
|
|
|
|
|
|
prof_recent_alloc_async_cleanup(tsd, &temp_list);
|
2019-12-19 05:38:14 +08:00
|
|
|
}
|
|
|
|
#undef PROF_RECENT_PRINT_BUFSIZE
|
|
|
|
|
|
|
|
bool
|
|
|
|
prof_recent_init() {
|
2020-12-18 03:25:13 +08:00
|
|
|
cassert(config_prof);
|
2019-12-19 05:38:14 +08:00
|
|
|
prof_recent_alloc_max_init();
|
|
|
|
|
2020-04-14 03:05:51 +08:00
|
|
|
if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
|
|
|
|
WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
|
|
|
|
WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
|
2019-12-19 05:38:14 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-03 01:48:58 +08:00
|
|
|
ql_new(&prof_recent_alloc_list);
|
2019-12-19 05:38:14 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|