Refactor and optimize prof sampling initialization.
Makes the prof sample prng use the tsd prng_state. This allows us to properly initialize the sample interval event, without having to create tdata. As a result, tdata will be created on demand (when a thread reaches the sample interval bytes allocated), instead of on the first allocation.
This commit is contained in:
13
src/prof.c
13
src/prof.c
@@ -149,7 +149,7 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
|
||||
*/
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata != NULL) {
|
||||
prof_sample_threshold_update(tdata);
|
||||
prof_sample_threshold_update(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -469,14 +469,12 @@ prof_tdata_mutex_choose(uint64_t thr_uid) {
|
||||
* -mno-sse) in order for the workaround to be complete.
|
||||
*/
|
||||
void
|
||||
prof_sample_threshold_update(prof_tdata_t *tdata) {
|
||||
prof_sample_threshold_update(tsd_t *tsd) {
|
||||
#ifdef JEMALLOC_PROF
|
||||
if (!config_prof) {
|
||||
return;
|
||||
}
|
||||
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
|
||||
if (lg_prof_sample == 0) {
|
||||
thread_prof_sample_event_update(tsd,
|
||||
THREAD_EVENT_MIN_START_WAIT);
|
||||
@@ -501,13 +499,12 @@ prof_sample_threshold_update(prof_tdata_t *tdata) {
|
||||
* pp 500
|
||||
* (http://luc.devroye.org/rnbookindex.html)
|
||||
*/
|
||||
uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
|
||||
uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
|
||||
double u = (double)r * (1.0/9007199254740992.0L);
|
||||
uint64_t bytes_until_sample = (uint64_t)(log(u) /
|
||||
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
|
||||
+ (uint64_t)1U;
|
||||
thread_prof_sample_event_update(tsd, bytes_until_sample);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -810,7 +807,7 @@ prof_thr_uid_alloc(tsdn_t *tsdn) {
|
||||
prof_tdata_t *
|
||||
prof_tdata_init(tsd_t *tsd) {
|
||||
return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
|
||||
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
|
||||
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)), false);
|
||||
}
|
||||
|
||||
static char *
|
||||
@@ -846,7 +843,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
|
||||
|
||||
prof_tdata_detach(tsd, tdata);
|
||||
return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
|
||||
active);
|
||||
active, true);
|
||||
}
|
||||
|
||||
void
|
||||
|
@@ -1198,7 +1198,7 @@ prof_bt_keycomp(const void *k1, const void *k2) {
|
||||
|
||||
prof_tdata_t *
|
||||
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
||||
char *thread_name, bool active) {
|
||||
char *thread_name, bool active, bool reset_interval) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
@@ -1227,8 +1227,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tdata->prng_state = (uint64_t)(uintptr_t)tdata;
|
||||
prof_sample_threshold_update(tdata);
|
||||
if (reset_interval) {
|
||||
prof_sample_threshold_update(tsd);
|
||||
}
|
||||
|
||||
tdata->enq = false;
|
||||
tdata->enq_idump = false;
|
||||
|
@@ -34,7 +34,7 @@ tsd_thread_tcache_gc_event_init(tsd_t *tsd) {
|
||||
static void
|
||||
tsd_thread_prof_sample_event_init(tsd_t *tsd) {
|
||||
assert(config_prof && opt_prof);
|
||||
/* Do not set sample interval until the first allocation. */
|
||||
prof_sample_threshold_update(tsd);
|
||||
}
|
||||
|
||||
static void
|
||||
|
Reference in New Issue
Block a user