Extract out per event new wait time fetching

This commit is contained in:
Yinan Zhang 2020-04-15 10:49:08 -07:00
parent 1e2524e15a
commit 733ae918f0
7 changed files with 48 additions and 21 deletions

View File

@ -74,7 +74,6 @@ void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
#endif #endif
int prof_getpid(void); int prof_getpid(void);
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind); void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes);
void prof_idump(tsdn_t *tsdn); void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename); bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn); void prof_gdump(tsdn_t *tsdn);
@ -99,7 +98,9 @@ void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(tsd_t *tsd); /* Only accessed by thread event. */
uint64_t prof_sample_new_event_wait(tsd_t *tsd);
bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes);
bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_start(tsdn_t *tsdn, const char *filename);
bool prof_log_stop(tsdn_t *tsdn); bool prof_log_stop(tsdn_t *tsdn);

View File

@ -37,7 +37,8 @@ extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6 #define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20) #define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
uint64_t stats_interval_accum_batch_size(void); /* Only accessed by thread event. */
uint64_t stats_interval_new_event_wait(tsd_t *tsd);
bool stats_interval_accum(tsd_t *tsd, uint64_t bytes); bool stats_interval_accum(tsd_t *tsd, uint64_t bytes);
/* Implements je_malloc_stats_print. */ /* Implements je_malloc_stats_print. */

View File

@ -26,8 +26,6 @@ extern cache_bin_info_t *tcache_bin_info;
extern tcaches_t *tcaches; extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, bool *tcache_success); cache_bin_t *tbin, szind_t binind, bool *tcache_success);
@ -55,4 +53,10 @@ bool tsd_tcache_enabled_data_init(tsd_t *tsd);
void tcache_assert_initialized(tcache_t *tcache); void tcache_assert_initialized(tcache_t *tcache);
/* Only accessed by thread event. */
uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
tcache_t *tcache);
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */

View File

@ -518,16 +518,11 @@ prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
* (e.g. * (e.g.
* -mno-sse) in order for the workaround to be complete. * -mno-sse) in order for the workaround to be complete.
*/ */
void uint64_t
prof_sample_threshold_update(tsd_t *tsd) { prof_sample_new_event_wait(tsd_t *tsd) {
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (!config_prof) {
return;
}
if (lg_prof_sample == 0) { if (lg_prof_sample == 0) {
te_prof_sample_event_update(tsd, TE_MIN_START_WAIT); return TE_MIN_START_WAIT;
return;
} }
/* /*
@ -557,10 +552,12 @@ prof_sample_threshold_update(tsd_t *tsd) {
*/ */
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53); uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L); double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
uint64_t bytes_until_sample = (uint64_t)(log(u) / return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U; + (uint64_t)1U;
te_prof_sample_event_update(tsd, bytes_until_sample); #else
not_reached();
return TE_MAX_START_WAIT;
#endif #endif
} }

View File

@ -1499,7 +1499,7 @@ stats_interval_accum(tsd_t *tsd, uint64_t bytes) {
} }
uint64_t uint64_t
stats_interval_accum_batch_size(void) { stats_interval_new_event_wait(tsd_t *tsd) {
return stats_interval_accum_batch; return stats_interval_accum_batch;
} }

View File

@ -40,6 +40,16 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, ptr); return arena_salloc(tsdn, ptr);
} }
uint64_t
tcache_gc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES;
}
uint64_t
tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES;
}
void void
tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) { tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
szind_t binind = tcache_slow->next_gc_bin; szind_t binind = tcache_slow->next_gc_bin;

View File

@ -4,6 +4,17 @@
#include "jemalloc/internal/thread_event.h" #include "jemalloc/internal/thread_event.h"
/*
* Signatures for functions computing new event wait time. The functions
* should be defined by the modules owning each event. The signatures here are
* used to verify that the definitions are in the right shape.
*/
#define E(event, condition_unused, is_alloc_event_unused) \
uint64_t event##_new_event_wait(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS
#undef E
/* TSD event init function signatures. */ /* TSD event init function signatures. */
#define E(event, condition_unused, is_alloc_event_unused) \ #define E(event, condition_unused, is_alloc_event_unused) \
static void te_tsd_##event##_event_init(tsd_t *tsd); static void te_tsd_##event##_event_init(tsd_t *tsd);
@ -22,26 +33,29 @@ ITERATE_OVER_ALL_EVENTS
static void static void
te_tsd_tcache_gc_event_init(tsd_t *tsd) { te_tsd_tcache_gc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0); assert(TCACHE_GC_INCR_BYTES > 0);
te_tcache_gc_event_update(tsd, TCACHE_GC_INCR_BYTES); uint64_t wait = tcache_gc_new_event_wait(tsd);
te_tcache_gc_event_update(tsd, wait);
} }
static void static void
te_tsd_tcache_gc_dalloc_event_init(tsd_t *tsd) { te_tsd_tcache_gc_dalloc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0); assert(TCACHE_GC_INCR_BYTES > 0);
te_tcache_gc_dalloc_event_update(tsd, TCACHE_GC_INCR_BYTES); uint64_t wait = tcache_gc_dalloc_new_event_wait(tsd);
te_tcache_gc_dalloc_event_update(tsd, wait);
} }
static void static void
te_tsd_prof_sample_event_init(tsd_t *tsd) { te_tsd_prof_sample_event_init(tsd_t *tsd) {
assert(config_prof && opt_prof); assert(config_prof && opt_prof);
prof_sample_threshold_update(tsd); uint64_t wait = prof_sample_new_event_wait(tsd);
te_prof_sample_event_update(tsd, wait);
} }
static void static void
te_tsd_stats_interval_event_init(tsd_t *tsd) { te_tsd_stats_interval_event_init(tsd_t *tsd) {
assert(opt_stats_interval >= 0); assert(opt_stats_interval >= 0);
uint64_t interval = stats_interval_accum_batch_size(); uint64_t wait = stats_interval_new_event_wait(tsd);
te_stats_interval_event_update(tsd, interval); te_stats_interval_event_update(tsd, wait);
} }
/* Handler functions. */ /* Handler functions. */