From 733ae918f0d848a64e88e622e348749fe6756d89 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Wed, 15 Apr 2020 10:49:08 -0700 Subject: [PATCH] Extract out per event new wait time fetching --- include/jemalloc/internal/prof_externs.h | 5 +++-- include/jemalloc/internal/stats.h | 3 ++- include/jemalloc/internal/tcache_externs.h | 8 ++++++-- src/prof.c | 17 +++++++-------- src/stats.c | 2 +- src/tcache.c | 10 +++++++++ src/thread_event.c | 24 +++++++++++++++++----- 7 files changed, 48 insertions(+), 21 deletions(-) diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h index a6b659c1..2284ae65 100644 --- a/include/jemalloc/internal/prof_externs.h +++ b/include/jemalloc/internal/prof_externs.h @@ -74,7 +74,6 @@ void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, #endif int prof_getpid(void); void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind); -bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); @@ -99,7 +98,9 @@ void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); -void prof_sample_threshold_update(tsd_t *tsd); +/* Only accessed by thread event. */ +uint64_t prof_sample_new_event_wait(tsd_t *tsd); +bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes); bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_stop(tsdn_t *tsdn); diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h index 7cd14302..42c321e5 100644 --- a/include/jemalloc/internal/stats.h +++ b/include/jemalloc/internal/stats.h @@ -37,7 +37,8 @@ extern char opt_stats_interval_opts[stats_print_tot_num_options+1]; #define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6 #define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20) -uint64_t stats_interval_accum_batch_size(void); +/* Only accessed by thread event. */ +uint64_t stats_interval_new_event_wait(tsd_t *tsd); bool stats_interval_accum(tsd_t *tsd, uint64_t bytes); /* Implements je_malloc_stats_print. */ diff --git a/include/jemalloc/internal/tcache_externs.h b/include/jemalloc/internal/tcache_externs.h index 7ca38d68..7fd730d6 100644 --- a/include/jemalloc/internal/tcache_externs.h +++ b/include/jemalloc/internal/tcache_externs.h @@ -26,8 +26,6 @@ extern cache_bin_info_t *tcache_bin_info; extern tcaches_t *tcaches; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); -void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, - tcache_t *tcache); void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success); @@ -55,4 +53,10 @@ bool tsd_tcache_enabled_data_init(tsd_t *tsd); void tcache_assert_initialized(tcache_t *tcache); +/* Only accessed by thread event. */ +uint64_t tcache_gc_new_event_wait(tsd_t *tsd); +uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd); +void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, + tcache_t *tcache); + #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ diff --git a/src/prof.c b/src/prof.c index 2e1d7689..94055855 100644 --- a/src/prof.c +++ b/src/prof.c @@ -518,16 +518,11 @@ prof_backtrace(tsd_t *tsd, prof_bt_t *bt) { * (e.g. * -mno-sse) in order for the workaround to be complete. */ -void -prof_sample_threshold_update(tsd_t *tsd) { +uint64_t +prof_sample_new_event_wait(tsd_t *tsd) { #ifdef JEMALLOC_PROF - if (!config_prof) { - return; - } - if (lg_prof_sample == 0) { - te_prof_sample_event_update(tsd, TE_MIN_START_WAIT); - return; + return TE_MIN_START_WAIT; } /* @@ -557,10 +552,12 @@ prof_sample_threshold_update(tsd_t *tsd) { */ uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53); double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L); - uint64_t bytes_until_sample = (uint64_t)(log(u) / + return (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + (uint64_t)1U; - te_prof_sample_event_update(tsd, bytes_until_sample); +#else + not_reached(); + return TE_MAX_START_WAIT; #endif } diff --git a/src/stats.c b/src/stats.c index 56d3b489..9d13f596 100644 --- a/src/stats.c +++ b/src/stats.c @@ -1499,7 +1499,7 @@ stats_interval_accum(tsd_t *tsd, uint64_t bytes) { } uint64_t -stats_interval_accum_batch_size(void) { +stats_interval_new_event_wait(tsd_t *tsd) { return stats_interval_accum_batch; } diff --git a/src/tcache.c b/src/tcache.c index 63e1a4d5..cba2ea72 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -40,6 +40,16 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) { return arena_salloc(tsdn, ptr); } +uint64_t +tcache_gc_new_event_wait(tsd_t *tsd) { + return TCACHE_GC_INCR_BYTES; +} + +uint64_t +tcache_gc_dalloc_new_event_wait(tsd_t *tsd) { + return TCACHE_GC_INCR_BYTES; +} + void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) { szind_t binind = tcache_slow->next_gc_bin; diff --git a/src/thread_event.c b/src/thread_event.c index c96dea6e..99467ee3 100644 --- a/src/thread_event.c +++ b/src/thread_event.c @@ -4,6 +4,17 @@ #include "jemalloc/internal/thread_event.h" +/* + * Signatures for functions computing new event wait time. The functions + * should be defined by the modules owning each event. The signatures here are + * used to verify that the definitions are in the right shape. + */ +#define E(event, condition_unused, is_alloc_event_unused) \ +uint64_t event##_new_event_wait(tsd_t *tsd); + +ITERATE_OVER_ALL_EVENTS +#undef E + /* TSD event init function signatures. */ #define E(event, condition_unused, is_alloc_event_unused) \ static void te_tsd_##event##_event_init(tsd_t *tsd); @@ -22,26 +33,29 @@ ITERATE_OVER_ALL_EVENTS static void te_tsd_tcache_gc_event_init(tsd_t *tsd) { assert(TCACHE_GC_INCR_BYTES > 0); - te_tcache_gc_event_update(tsd, TCACHE_GC_INCR_BYTES); + uint64_t wait = tcache_gc_new_event_wait(tsd); + te_tcache_gc_event_update(tsd, wait); } static void te_tsd_tcache_gc_dalloc_event_init(tsd_t *tsd) { assert(TCACHE_GC_INCR_BYTES > 0); - te_tcache_gc_dalloc_event_update(tsd, TCACHE_GC_INCR_BYTES); + uint64_t wait = tcache_gc_dalloc_new_event_wait(tsd); + te_tcache_gc_dalloc_event_update(tsd, wait); } static void te_tsd_prof_sample_event_init(tsd_t *tsd) { assert(config_prof && opt_prof); - prof_sample_threshold_update(tsd); + uint64_t wait = prof_sample_new_event_wait(tsd); + te_prof_sample_event_update(tsd, wait); } static void te_tsd_stats_interval_event_init(tsd_t *tsd) { assert(opt_stats_interval >= 0); - uint64_t interval = stats_interval_accum_batch_size(); - te_stats_interval_event_update(tsd, interval); + uint64_t wait = stats_interval_new_event_wait(tsd); + te_stats_interval_event_update(tsd, wait); } /* Handler functions. */