Add experimental.thread.activity_callback.

This (experimental, undocumented) functionality can be used by users to track
various statistics of interest at a finer level of granularity than the thread.
This commit is contained in:
David Goldblatt 2020-10-30 16:31:32 -07:00 committed by David Goldblatt
parent 27ef02ca9a
commit 1b3ee75667
5 changed files with 151 additions and 3 deletions

View File

@ -0,0 +1,23 @@
#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
/*
* The callback to be executed "periodically", in response to some amount of
* allocator activity.
*
* This callback need not be computing any sort of peak (although that's the
* intended first use case), but we drive it from the peak counter, so it's
* keeps things tidy to keep it here.
*
* The calls to this thunk get driven by the peak_event module.
*/
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
uint64_t deallocated);
typedef struct activity_callback_thunk_s activity_callback_thunk_t;
struct activity_callback_thunk_s {
activity_callback_t callback;
void *uctx;
};
#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */

View File

@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_TSD_H #ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H #define JEMALLOC_INTERNAL_TSD_H
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h" #include "jemalloc/internal/bin_types.h"
@ -82,6 +83,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(tsd_link, tsd_link_t, tsd_link_t) \ O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \ O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \ O(peak, peak_t, peak_t) \
O(activity_callback_thunk, activity_callback_thunk_t, \
activity_callback_thunk_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \ O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
@ -112,6 +115,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* tsd_link */ {NULL}, \ /* tsd_link */ {NULL}, \
/* in_hook */ false, \ /* in_hook */ false, \
/* peak */ PEAK_INITIALIZER, \ /* peak */ PEAK_INITIALIZER, \
/* activity_callback_thunk */ \
ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \ /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */ RTREE_CTX_ZERO_INITIALIZER, /* rtree_ctx */ RTREE_CTX_ZERO_INITIALIZER,

View File

@ -264,6 +264,7 @@ CTL_PROTO(stats_retained)
CTL_PROTO(stats_zero_reallocs) CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove) CTL_PROTO(experimental_hooks_remove)
CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query) CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query) CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep) CTL_PROTO(experimental_arenas_i_pactivep)
@ -712,6 +713,11 @@ static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("remove"), CTL(experimental_hooks_remove)} {NAME("remove"), CTL(experimental_hooks_remove)}
}; };
static const ctl_named_node_t experimental_thread_node[] = {
{NAME("activity_callback"),
CTL(experimental_thread_activity_callback)}
};
static const ctl_named_node_t experimental_utilization_node[] = { static const ctl_named_node_t experimental_utilization_node[] = {
{NAME("query"), CTL(experimental_utilization_query)}, {NAME("query"), CTL(experimental_utilization_query)},
{NAME("batch_query"), CTL(experimental_utilization_batch_query)} {NAME("batch_query"), CTL(experimental_utilization_batch_query)}
@ -738,7 +744,8 @@ static const ctl_named_node_t experimental_node[] = {
{NAME("utilization"), CHILD(named, experimental_utilization)}, {NAME("utilization"), CHILD(named, experimental_utilization)},
{NAME("arenas"), CHILD(indexed, experimental_arenas)}, {NAME("arenas"), CHILD(indexed, experimental_arenas)},
{NAME("prof_recent"), CHILD(named, experimental_prof_recent)}, {NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
{NAME("batch_alloc"), CTL(experimental_batch_alloc)} {NAME("batch_alloc"), CTL(experimental_batch_alloc)},
{NAME("thread"), CHILD(named, experimental_thread)}
}; };
static const ctl_named_node_t root_node[] = { static const ctl_named_node_t root_node[] = {
@ -3428,6 +3435,32 @@ label_return:
return ret; return ret;
} }
static int
experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
READ(t_old, activity_callback_thunk_t);
if (newp != NULL) {
/*
* This initialization is unnecessary. If it's omitted, though,
* clang gets confused and warns on the subsequent use of t_new.
*/
activity_callback_thunk_t t_new = {NULL, NULL};
WRITE(t_new, activity_callback_thunk_t);
tsd_activity_callback_thunk_set(tsd, t_new);
}
ret = 0;
label_return:
return ret;
}
/* /*
* Output six memory utilization entries for an input pointer, the first one of * Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following * type (void *) and the remaining five of type size_t, describing the following

View File

@ -1,9 +1,11 @@
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/peak_event.h" #include "jemalloc/internal/peak_event.h"
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/peak.h"
/* /*
* Update every 64K by default. We're not exposing this as a configuration * Update every 64K by default. We're not exposing this as a configuration
* option for now; we don't want to bind ourselves too tightly to any particular * option for now; we don't want to bind ourselves too tightly to any particular
@ -21,6 +23,17 @@ peak_event_update(tsd_t *tsd) {
peak_update(peak, alloc, dalloc); peak_update(peak, alloc, dalloc);
} }
static void
peak_event_activity_callback(tsd_t *tsd) {
activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
tsd);
uint64_t alloc = tsd_thread_allocated_get(tsd);
uint64_t dalloc = tsd_thread_deallocated_get(tsd);
if (thunk->callback != NULL) {
thunk->callback(thunk->uctx, alloc, dalloc);
}
}
/* Set current state to zero. */ /* Set current state to zero. */
void void
peak_event_zero(tsd_t *tsd) { peak_event_zero(tsd_t *tsd) {
@ -49,6 +62,7 @@ peak_alloc_postponed_event_wait(tsd_t *tsd) {
void void
peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) { peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
peak_event_update(tsd); peak_event_update(tsd);
peak_event_activity_callback(tsd);
} }
uint64_t uint64_t
@ -64,4 +78,5 @@ peak_dalloc_postponed_event_wait(tsd_t *tsd) {
void void
peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) { peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
peak_event_update(tsd); peak_event_update(tsd);
peak_event_activity_callback(tsd);
} }

View File

@ -1030,6 +1030,77 @@ TEST_BEGIN(test_thread_peak) {
} }
TEST_END TEST_END
typedef struct activity_test_data_s activity_test_data_t;
struct activity_test_data_s {
uint64_t obtained_alloc;
uint64_t obtained_dalloc;
};
static void
activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
activity_test_data_t *test_data = (activity_test_data_t *)uctx;
test_data->obtained_alloc = alloc;
test_data->obtained_dalloc = dalloc;
}
TEST_BEGIN(test_thread_activity_callback) {
test_skip_if(!config_stats);
const size_t big_size = 10 * 1024 * 1024;
void *ptr;
int err;
size_t sz;
uint64_t *allocatedp;
uint64_t *deallocatedp;
sz = sizeof(allocatedp);
err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
assert_d_eq(0, err, "");
err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
assert_d_eq(0, err, "");
activity_callback_thunk_t old_thunk = {(activity_callback_t)111,
(void *)222};
activity_test_data_t test_data = {333, 444};
activity_callback_thunk_t new_thunk =
{&activity_test_callback, &test_data};
sz = sizeof(old_thunk);
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
&new_thunk, sizeof(new_thunk));
assert_d_eq(0, err, "");
expect_true(old_thunk.callback == NULL, "Callback already installed");
expect_true(old_thunk.uctx == NULL, "Callback data already installed");
ptr = mallocx(big_size, 0);
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
free(ptr);
expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
sz = sizeof(old_thunk);
new_thunk = (activity_callback_thunk_t){ NULL, NULL };
err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
&new_thunk, sizeof(new_thunk));
assert_d_eq(0, err, "");
expect_true(old_thunk.callback == &activity_test_callback, "");
expect_true(old_thunk.uctx == &test_data, "");
/* Inserting NULL should have turned off tracking. */
test_data.obtained_alloc = 333;
test_data.obtained_dalloc = 444;
ptr = mallocx(big_size, 0);
free(ptr);
expect_u64_eq(333, test_data.obtained_alloc, "");
expect_u64_eq(444, test_data.obtained_dalloc, "");
}
TEST_END
int int
main(void) { main(void) {
return test( return test(
@ -1063,5 +1134,6 @@ main(void) {
test_hooks, test_hooks,
test_hooks_exhaustion, test_hooks_exhaustion,
test_thread_idle, test_thread_idle,
test_thread_peak); test_thread_peak,
test_thread_activity_callback);
} }