Allow setting custom backtrace hook

Existing backtrace implementations skip native stack frames from runtimes like
Python. The hook allows to augment the backtraces to attribute allocations to
native functions in heap profiles.
This commit is contained in:
Alex Lapenkou
2021-08-30 14:05:56 -07:00
committed by Alexander Lapenkov
parent 523cfa55c5
commit f7d46b8119
11 changed files with 172 additions and 30 deletions

View File

@@ -305,6 +305,7 @@ CTL_PROTO(stats_retained)
CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
CTL_PROTO(experimental_hooks_prof_backtrace)
CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
@@ -833,7 +834,8 @@ static const ctl_named_node_t stats_node[] = {
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
{NAME("remove"), CTL(experimental_hooks_remove)}
{NAME("remove"), CTL(experimental_hooks_remove)},
{NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)}
};
static const ctl_named_node_t experimental_thread_node[] = {
@@ -3328,6 +3330,38 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
return 0;
}
static int
experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL && newp == NULL) {
ret = EINVAL;
goto label_return;
}
if (oldp != NULL) {
prof_backtrace_hook_t old_hook =
prof_backtrace_hook_get();
READ(old_hook, prof_backtrace_hook_t);
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_hook, prof_backtrace_hook_t);
if (new_hook == NULL) {
ret = EINVAL;
goto label_return;
}
prof_backtrace_hook_set(new_hook);
}
ret = 0;
label_return:
return ret;
}
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)

View File

@@ -10,6 +10,7 @@
#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/prof_stats.h"
#include "jemalloc/internal/prof_sys.h"
#include "jemalloc/internal/prof_hook.h"
#include "jemalloc/internal/thread_event.h"
/*
@@ -69,6 +70,9 @@ static malloc_mutex_t next_thr_uid_mtx;
/* Do not dump any profiles until bootstrapping is complete. */
bool prof_booted = false;
/* Logically a prof_backtrace_hook_t. */
atomic_p_t prof_backtrace_hook;
/******************************************************************************/
void
@@ -518,6 +522,17 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) {
return prof_gdump_old;
}
void
prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
atomic_store_p(&prof_backtrace_hook, hook, ATOMIC_RELEASE);
}
prof_backtrace_hook_t
prof_backtrace_hook_get() {
return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
ATOMIC_ACQUIRE);
}
void
prof_boot0(void) {
cassert(config_prof);
@@ -657,6 +672,7 @@ prof_boot2(tsd_t *tsd, base_t *base) {
}
}
prof_hooks_init();
prof_unwind_init();
}
prof_booted = true;

View File

@@ -49,18 +49,18 @@ bt_init(prof_bt_t *bt, void **vec) {
#ifdef JEMALLOC_PROF_LIBUNWIND
static void
prof_backtrace_impl(prof_bt_t *bt) {
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
int nframes;
cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
assert(*len == 0);
assert(vec != NULL);
nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
nframes = unw_backtrace(vec, PROF_BT_MAX);
if (nframes <= 0) {
return;
}
bt->len = nframes;
*len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
@@ -81,9 +81,9 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
if (ip == NULL) {
return _URC_END_OF_STACK;
}
data->bt->vec[data->bt->len] = ip;
data->bt->len++;
if (data->bt->len == data->max) {
data->vec[*data->len] = ip;
(*data->len)++;
if (*data->len == data->max) {
return _URC_END_OF_STACK;
}
@@ -91,8 +91,8 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
}
static void
prof_backtrace_impl(prof_bt_t *bt) {
prof_unwind_data_t data = {bt, PROF_BT_MAX};
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
prof_unwind_data_t data = {vec, len, max_len};
cassert(config_prof);
@@ -100,9 +100,9 @@ prof_backtrace_impl(prof_bt_t *bt) {
}
#elif (defined(JEMALLOC_PROF_GCC))
static void
prof_backtrace_impl(prof_bt_t *bt) {
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
#define BT_FRAME(i) \
if ((i) < PROF_BT_MAX) { \
if ((i) < max_len) { \
void *p; \
if (__builtin_frame_address(i) == 0) { \
return; \
@@ -111,8 +111,8 @@ prof_backtrace_impl(prof_bt_t *bt) {
if (p == NULL) { \
return; \
} \
bt->vec[(i)] = p; \
bt->len = (i) + 1; \
vec[(i)] = p; \
*len = (i) + 1; \
} else { \
return; \
}
@@ -263,24 +263,28 @@ prof_backtrace_impl(prof_bt_t *bt) {
}
#else
static void
prof_backtrace_impl(prof_bt_t *bt) {
prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
cassert(config_prof);
not_reached();
}
#endif
void (* JET_MUTABLE prof_backtrace_hook)(prof_bt_t *bt) = &prof_backtrace_impl;
void
prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
cassert(config_prof);
pre_reentrancy(tsd, NULL);
prof_backtrace_hook(bt);
prof_backtrace_hook_t prof_backtrace_hook = prof_backtrace_hook_get();
prof_backtrace_hook(bt->vec, &bt->len, PROF_BT_MAX);
post_reentrancy(tsd);
}
void prof_unwind_init() {
void
prof_hooks_init() {
prof_backtrace_hook_set(&prof_backtrace_impl);
}
void
prof_unwind_init() {
#ifdef JEMALLOC_PROF_LIBGCC
/*
* Cause the backtracing machinery to allocate its internal