Extract out per event new wait time fetching

This commit is contained in:
Yinan Zhang 2020-04-15 10:49:08 -07:00
parent 1e2524e15a
commit 733ae918f0
7 changed files with 48 additions and 21 deletions

View File

@ -74,7 +74,6 @@ void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
#endif
int prof_getpid(void);
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
@ -99,7 +98,9 @@ void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(tsd_t *tsd);
/* Only accessed by thread event. */
uint64_t prof_sample_new_event_wait(tsd_t *tsd);
bool prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes);
bool prof_log_start(tsdn_t *tsdn, const char *filename);
bool prof_log_stop(tsdn_t *tsdn);

View File

@ -37,7 +37,8 @@ extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
uint64_t stats_interval_accum_batch_size(void);
/* Only accessed by thread event. */
uint64_t stats_interval_new_event_wait(tsd_t *tsd);
bool stats_interval_accum(tsd_t *tsd, uint64_t bytes);
/* Implements je_malloc_stats_print. */

View File

@ -26,8 +26,6 @@ extern cache_bin_info_t *tcache_bin_info;
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, bool *tcache_success);
@ -55,4 +53,10 @@ bool tsd_tcache_enabled_data_init(tsd_t *tsd);
void tcache_assert_initialized(tcache_t *tcache);
/* Only accessed by thread event. */
uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
void tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow,
tcache_t *tcache);
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */

View File

@ -518,16 +518,11 @@ prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
void
prof_sample_threshold_update(tsd_t *tsd) {
uint64_t
prof_sample_new_event_wait(tsd_t *tsd) {
#ifdef JEMALLOC_PROF
if (!config_prof) {
return;
}
if (lg_prof_sample == 0) {
te_prof_sample_event_update(tsd, TE_MIN_START_WAIT);
return;
return TE_MIN_START_WAIT;
}
/*
@ -557,10 +552,12 @@ prof_sample_threshold_update(tsd_t *tsd) {
*/
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
uint64_t bytes_until_sample = (uint64_t)(log(u) /
return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
te_prof_sample_event_update(tsd, bytes_until_sample);
#else
not_reached();
return TE_MAX_START_WAIT;
#endif
}

View File

@ -1499,7 +1499,7 @@ stats_interval_accum(tsd_t *tsd, uint64_t bytes) {
}
uint64_t
stats_interval_accum_batch_size(void) {
stats_interval_new_event_wait(tsd_t *tsd) {
return stats_interval_accum_batch;
}

View File

@ -40,6 +40,16 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, ptr);
}
uint64_t
tcache_gc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES;
}
uint64_t
tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES;
}
void
tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
szind_t binind = tcache_slow->next_gc_bin;

View File

@ -4,6 +4,17 @@
#include "jemalloc/internal/thread_event.h"
/*
* Signatures for functions computing new event wait time. The functions
* should be defined by the modules owning each event. The signatures here are
* used to verify that the definitions are in the right shape.
*/
#define E(event, condition_unused, is_alloc_event_unused) \
uint64_t event##_new_event_wait(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS
#undef E
/* TSD event init function signatures. */
#define E(event, condition_unused, is_alloc_event_unused) \
static void te_tsd_##event##_event_init(tsd_t *tsd);
@ -22,26 +33,29 @@ ITERATE_OVER_ALL_EVENTS
static void
te_tsd_tcache_gc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
te_tcache_gc_event_update(tsd, TCACHE_GC_INCR_BYTES);
uint64_t wait = tcache_gc_new_event_wait(tsd);
te_tcache_gc_event_update(tsd, wait);
}
static void
te_tsd_tcache_gc_dalloc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
te_tcache_gc_dalloc_event_update(tsd, TCACHE_GC_INCR_BYTES);
uint64_t wait = tcache_gc_dalloc_new_event_wait(tsd);
te_tcache_gc_dalloc_event_update(tsd, wait);
}
static void
te_tsd_prof_sample_event_init(tsd_t *tsd) {
assert(config_prof && opt_prof);
prof_sample_threshold_update(tsd);
uint64_t wait = prof_sample_new_event_wait(tsd);
te_prof_sample_event_update(tsd, wait);
}
static void
te_tsd_stats_interval_event_init(tsd_t *tsd) {
assert(opt_stats_interval >= 0);
uint64_t interval = stats_interval_accum_batch_size();
te_stats_interval_event_update(tsd, interval);
uint64_t wait = stats_interval_new_event_wait(tsd);
te_stats_interval_event_update(tsd, wait);
}
/* Handler functions. */