Extract out per event new wait time fetching

This commit is contained in:
Yinan Zhang
2020-04-15 10:49:08 -07:00
parent 1e2524e15a
commit 733ae918f0
7 changed files with 48 additions and 21 deletions

View File

@@ -518,16 +518,11 @@ prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
void
prof_sample_threshold_update(tsd_t *tsd) {
uint64_t
prof_sample_new_event_wait(tsd_t *tsd) {
#ifdef JEMALLOC_PROF
if (!config_prof) {
return;
}
if (lg_prof_sample == 0) {
te_prof_sample_event_update(tsd, TE_MIN_START_WAIT);
return;
return TE_MIN_START_WAIT;
}
/*
@@ -557,10 +552,12 @@ prof_sample_threshold_update(tsd_t *tsd) {
*/
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
uint64_t bytes_until_sample = (uint64_t)(log(u) /
return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
te_prof_sample_event_update(tsd, bytes_until_sample);
#else
not_reached();
return TE_MAX_START_WAIT;
#endif
}

View File

@@ -1499,7 +1499,7 @@ stats_interval_accum(tsd_t *tsd, uint64_t bytes) {
}
uint64_t
stats_interval_accum_batch_size(void) {
stats_interval_new_event_wait(tsd_t *tsd) {
return stats_interval_accum_batch;
}

View File

@@ -40,6 +40,16 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, ptr);
}
uint64_t
tcache_gc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES;
}
uint64_t
tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
return TCACHE_GC_INCR_BYTES;
}
void
tcache_event_hard(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache) {
szind_t binind = tcache_slow->next_gc_bin;

View File

@@ -4,6 +4,17 @@
#include "jemalloc/internal/thread_event.h"
/*
* Signatures for functions computing new event wait time. The functions
* should be defined by the modules owning each event. The signatures here are
* used to verify that the definitions are in the right shape.
*/
#define E(event, condition_unused, is_alloc_event_unused) \
uint64_t event##_new_event_wait(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS
#undef E
/* TSD event init function signatures. */
#define E(event, condition_unused, is_alloc_event_unused) \
static void te_tsd_##event##_event_init(tsd_t *tsd);
@@ -22,26 +33,29 @@ ITERATE_OVER_ALL_EVENTS
static void
te_tsd_tcache_gc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
te_tcache_gc_event_update(tsd, TCACHE_GC_INCR_BYTES);
uint64_t wait = tcache_gc_new_event_wait(tsd);
te_tcache_gc_event_update(tsd, wait);
}
static void
te_tsd_tcache_gc_dalloc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
te_tcache_gc_dalloc_event_update(tsd, TCACHE_GC_INCR_BYTES);
uint64_t wait = tcache_gc_dalloc_new_event_wait(tsd);
te_tcache_gc_dalloc_event_update(tsd, wait);
}
static void
te_tsd_prof_sample_event_init(tsd_t *tsd) {
assert(config_prof && opt_prof);
prof_sample_threshold_update(tsd);
uint64_t wait = prof_sample_new_event_wait(tsd);
te_prof_sample_event_update(tsd, wait);
}
static void
te_tsd_stats_interval_event_init(tsd_t *tsd) {
assert(opt_stats_interval >= 0);
uint64_t interval = stats_interval_accum_batch_size();
te_stats_interval_event_update(tsd, interval);
uint64_t wait = stats_interval_new_event_wait(tsd);
te_stats_interval_event_update(tsd, wait);
}
/* Handler functions. */