Extract out per event postponed wait time fetching

This commit is contained in:
Yinan Zhang 2020-04-16 13:33:56 -07:00
parent f72014d097
commit abd4674931
7 changed files with 37 additions and 5 deletions

View File

@ -98,8 +98,10 @@ void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn);
/* Only accessed by thread event. */ /* Only accessed by thread event. */
uint64_t prof_sample_new_event_wait(tsd_t *tsd); uint64_t prof_sample_new_event_wait(tsd_t *tsd);
uint64_t prof_sample_postponed_event_wait(tsd_t *tsd);
bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes); bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes);
bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_start(tsdn_t *tsdn, const char *filename);

View File

@ -39,6 +39,7 @@ extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
/* Only accessed by thread event. */ /* Only accessed by thread event. */
uint64_t stats_interval_new_event_wait(tsd_t *tsd); uint64_t stats_interval_new_event_wait(tsd_t *tsd);
uint64_t stats_interval_postponed_event_wait(tsd_t *tsd);
bool stats_interval_accum(tsd_t *tsd, uint64_t bytes); bool stats_interval_accum(tsd_t *tsd, uint64_t bytes);
/* Implements je_malloc_stats_print. */ /* Implements je_malloc_stats_print. */

View File

@ -55,7 +55,9 @@ void tcache_assert_initialized(tcache_t *tcache);
/* Only accessed by thread event. */ /* Only accessed by thread event. */
uint64_t tcache_gc_new_event_wait(tsd_t *tsd); uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd);
uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd); uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd);
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
tcache_t *tcache); tcache_t *tcache);

View File

@ -561,6 +561,11 @@ prof_sample_new_event_wait(tsd_t *tsd) {
#endif #endif
} }
uint64_t
prof_sample_postponed_event_wait(tsd_t *tsd) {
return TE_MIN_START_WAIT;
}
int int
prof_getpid(void) { prof_getpid(void) {
#ifdef _WIN32 #ifdef _WIN32

View File

@ -1503,6 +1503,11 @@ stats_interval_new_event_wait(tsd_t *tsd) {
return stats_interval_accum_batch; return stats_interval_accum_batch;
} }
uint64_t
stats_interval_postponed_event_wait(tsd_t *tsd) {
return TE_MIN_START_WAIT;
}
bool bool
stats_boot(void) { stats_boot(void) {
uint64_t stats_interval; uint64_t stats_interval;

View File

@ -45,11 +45,21 @@ tcache_gc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES; return TCACHE_GC_INCR_BYTES;
} }
uint64_t
tcache_gc_postponed_event_wait(tsd_t *tsd) {
return TE_MIN_START_WAIT;
}
uint64_t uint64_t
tcache_gc_dalloc_new_event_wait(tsd_t *tsd) { tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES; return TCACHE_GC_INCR_BYTES;
} }
uint64_t
tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd) {
return TE_MIN_START_WAIT;
}
void void
tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) { tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
szind_t binind = tcache_slow->next_gc_bin; szind_t binind = tcache_slow->next_gc_bin;

View File

@ -5,12 +5,19 @@
#include "jemalloc/internal/thread_event.h" #include "jemalloc/internal/thread_event.h"
/* /*
* Signatures for functions computing new event wait time. The functions * Signatures for functions computing new / postponed event wait time. New
* should be defined by the modules owning each event. The signatures here are * event wait time is the time till the next event if an event is currently
* used to verify that the definitions are in the right shape. * being triggered; postponed event wait time is the time till the next event
* if an event should be triggered but needs to be postponed, e.g. when the TSD
* is not nominal or during reentrancy.
*
* These event wait time computation functions should be defined by the modules
* owning each event. The signatures here are used to verify that the
* definitions follow the right format.
*/ */
#define E(event, condition_unused, is_alloc_event_unused) \ #define E(event, condition_unused, is_alloc_event_unused) \
uint64_t event##_new_event_wait(tsd_t *tsd); uint64_t event##_new_event_wait(tsd_t *tsd); \
uint64_t event##_postponed_event_wait(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS ITERATE_OVER_ALL_EVENTS
#undef E #undef E
@ -256,7 +263,7 @@ te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
if (event_wait > accumbytes) { \ if (event_wait > accumbytes) { \
event_wait -= accumbytes; \ event_wait -= accumbytes; \
} else if (!allow_event_trigger) { \ } else if (!allow_event_trigger) { \
event_wait = TE_MIN_START_WAIT; \ event_wait = event##_postponed_event_wait(tsd); \
} else { \ } else { \
is_##event##_triggered = true; \ is_##event##_triggered = true; \
event_wait = event##_new_event_wait(tsd); \ event_wait = event##_new_event_wait(tsd); \