Push event handlers to constituent modules
This commit is contained in:
parent
381c97caa4
commit
b06dfb9ccc
@ -102,7 +102,7 @@ void prof_postfork_child(tsdn_t *tsdn);
|
||||
/* Only accessed by thread event. */
|
||||
uint64_t prof_sample_new_event_wait(tsd_t *tsd);
|
||||
uint64_t prof_sample_postponed_event_wait(tsd_t *tsd);
|
||||
bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes);
|
||||
void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
bool prof_log_start(tsdn_t *tsdn, const char *filename);
|
||||
bool prof_log_stop(tsdn_t *tsdn);
|
||||
|
@ -40,7 +40,7 @@ extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
|
||||
/* Only accessed by thread event. */
|
||||
uint64_t stats_interval_new_event_wait(tsd_t *tsd);
|
||||
uint64_t stats_interval_postponed_event_wait(tsd_t *tsd);
|
||||
bool stats_interval_accum(tsd_t *tsd, uint64_t bytes);
|
||||
void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
/* Implements je_malloc_stats_print. */
|
||||
void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts);
|
||||
|
@ -56,9 +56,9 @@ void tcache_assert_initialized(tcache_t *tcache);
|
||||
/* Only accessed by thread event. */
|
||||
uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
|
||||
uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd);
|
||||
void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
|
||||
uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd);
|
||||
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
|
||||
tcache_t *tcache);
|
||||
void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
|
||||
|
@ -23,6 +23,12 @@
|
||||
*/
|
||||
#define TE_MAX_INTERVAL ((uint64_t)(4U << 20))
|
||||
|
||||
/*
|
||||
* Invalid elapsed time, for situations where elapsed time is not needed. See
|
||||
* comments in thread_event.c for more info.
|
||||
*/
|
||||
#define TE_INVALID_ELAPSED UINT64_MAX
|
||||
|
||||
typedef struct te_ctx_s {
|
||||
bool is_alloc;
|
||||
uint64_t *current;
|
||||
|
25
src/prof.c
25
src/prof.c
@ -50,7 +50,7 @@ bool opt_prof_accum = false;
|
||||
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
|
||||
bool opt_prof_experimental_use_sys_thread_name = false;
|
||||
|
||||
/* Accessed via prof_idump_accum(). */
|
||||
/* Accessed via prof_sample_event_handler(). */
|
||||
static counter_accum_t prof_idump_accumulated;
|
||||
|
||||
/*
|
||||
@ -574,6 +574,18 @@ prof_sample_postponed_event_wait(tsd_t *tsd) {
|
||||
return prof_sample_new_event_wait(tsd);
|
||||
}
|
||||
|
||||
void
|
||||
prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed) {
|
||||
cassert(config_prof);
|
||||
assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
|
||||
if (prof_interval == 0 || !prof_active_get_unlocked()) {
|
||||
return;
|
||||
}
|
||||
if (counter_accum(tsd_tsdn(tsd), &prof_idump_accumulated, elapsed)) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
prof_getpid(void) {
|
||||
#ifdef _WIN32
|
||||
@ -658,17 +670,6 @@ prof_idump_accum_init(void) {
|
||||
return counter_accum_init(&prof_idump_accumulated, prof_interval);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_interval == 0 || !prof_active_get_unlocked()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return counter_accum(tsdn, &prof_idump_accumulated, accumbytes);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_dump_prefix_set(tsdn_t *tsdn, const char *prefix) {
|
||||
cassert(config_prof);
|
||||
|
14
src/stats.c
14
src/stats.c
@ -1493,11 +1493,6 @@ stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) {
|
||||
emitter_end(&emitter);
|
||||
}
|
||||
|
||||
bool
|
||||
stats_interval_accum(tsd_t *tsd, uint64_t bytes) {
|
||||
return counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated, bytes);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
stats_interval_new_event_wait(tsd_t *tsd) {
|
||||
return stats_interval_accum_batch;
|
||||
@ -1508,6 +1503,15 @@ stats_interval_postponed_event_wait(tsd_t *tsd) {
|
||||
return TE_MIN_START_WAIT;
|
||||
}
|
||||
|
||||
void
|
||||
stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) {
|
||||
assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
|
||||
if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated,
|
||||
elapsed)) {
|
||||
je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
stats_boot(void) {
|
||||
uint64_t stats_interval;
|
||||
|
22
src/tcache.c
22
src/tcache.c
@ -60,8 +60,14 @@ tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd) {
|
||||
return TE_MIN_START_WAIT;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
|
||||
static void
|
||||
tcache_event(tsd_t *tsd) {
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
|
||||
szind_t binind = tcache_slow->next_gc_bin;
|
||||
bool is_small = (binind < SC_NBINS);
|
||||
cache_bin_t *cache_bin = &tcache->bins[binind];
|
||||
@ -110,6 +116,18 @@ tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed) {
|
||||
assert(elapsed == TE_INVALID_ELAPSED);
|
||||
tcache_event(tsd);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
|
||||
assert(elapsed == TE_INVALID_ELAPSED);
|
||||
tcache_event(tsd);
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind,
|
||||
|
@ -5,68 +5,59 @@
|
||||
#include "jemalloc/internal/thread_event.h"
|
||||
|
||||
/*
|
||||
* Signatures for functions computing new / postponed event wait time. New
|
||||
* Signatures for event specific functions. These functions should be defined
|
||||
* by the modules owning each event. The signatures here verify that the
|
||||
* definitions follow the right format.
|
||||
*
|
||||
* The first two are functions computing new / postponed event wait time. New
|
||||
* event wait time is the time till the next event if an event is currently
|
||||
* being triggered; postponed event wait time is the time till the next event
|
||||
* if an event should be triggered but needs to be postponed, e.g. when the TSD
|
||||
* is not nominal or during reentrancy.
|
||||
*
|
||||
* These event wait time computation functions should be defined by the modules
|
||||
* owning each event. The signatures here are used to verify that the
|
||||
* definitions follow the right format.
|
||||
* The third is the event handler function, which is called whenever an event
|
||||
* is triggered. The parameter is the elapsed time since the last time an
|
||||
* event of the same type was triggered.
|
||||
*/
|
||||
#define E(event, condition_unused, is_alloc_event_unused) \
|
||||
uint64_t event##_new_event_wait(tsd_t *tsd); \
|
||||
uint64_t event##_postponed_event_wait(tsd_t *tsd);
|
||||
uint64_t event##_postponed_event_wait(tsd_t *tsd); \
|
||||
void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
/* Event handler function signatures. */
|
||||
/* Signatures for internal functions fetching elapsed time. */
|
||||
#define E(event, condition_unused, is_alloc_event_unused) \
|
||||
static void event##_event_handler(tsd_t *tsd);
|
||||
static uint64_t event##_fetch_elapsed(tsd_t *tsd);
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
/* Handler functions. */
|
||||
static void
|
||||
tcache_gc_event(tsd_t *tsd) {
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache != NULL) {
|
||||
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
|
||||
tcache_event_hard(tsd, tcache_slow, tcache);
|
||||
}
|
||||
static uint64_t
|
||||
tcache_gc_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_gc_event_handler(tsd_t *tsd) {
|
||||
tcache_gc_event(tsd);
|
||||
static uint64_t
|
||||
tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
|
||||
return TE_INVALID_ELAPSED;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_gc_dalloc_event_handler(tsd_t *tsd) {
|
||||
tcache_gc_event(tsd);
|
||||
}
|
||||
|
||||
static void
|
||||
prof_sample_event_handler(tsd_t *tsd) {
|
||||
static uint64_t
|
||||
prof_sample_fetch_elapsed(tsd_t *tsd) {
|
||||
uint64_t last_event = thread_allocated_last_event_get(tsd);
|
||||
uint64_t last_sample_event = prof_sample_last_event_get(tsd);
|
||||
prof_sample_last_event_set(tsd, last_event);
|
||||
if (prof_idump_accum(tsd_tsdn(tsd), last_event - last_sample_event)) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
return last_event - last_sample_event;
|
||||
}
|
||||
|
||||
static void
|
||||
stats_interval_event_handler(tsd_t *tsd) {
|
||||
static uint64_t
|
||||
stats_interval_fetch_elapsed(tsd_t *tsd) {
|
||||
uint64_t last_event = thread_allocated_last_event_get(tsd);
|
||||
uint64_t last_stats_event = stats_interval_last_event_get(tsd);
|
||||
stats_interval_last_event_set(tsd, last_event);
|
||||
if (stats_interval_accum(tsd, last_event - last_stats_event)) {
|
||||
je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
|
||||
}
|
||||
return last_event - last_stats_event;
|
||||
}
|
||||
|
||||
/* Per event facilities done. */
|
||||
@ -286,7 +277,8 @@ te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
|
||||
if (is_alloc == alloc_event && condition && \
|
||||
is_##event##_triggered) { \
|
||||
assert(allow_event_trigger); \
|
||||
event##_event_handler(tsd); \
|
||||
uint64_t elapsed = event##_fetch_elapsed(tsd); \
|
||||
event##_event_handler(tsd, elapsed); \
|
||||
}
|
||||
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
|
Loading…
Reference in New Issue
Block a user