Move sampling init into prof_alloc_prep().

Move prof_sample_threshold initialization into prof_alloc_prep(),
before using it to decide whether to capture a backtrace.
This commit is contained in:
Jason Evans 2010-03-03 11:53:11 -08:00
parent ca6bd4f1c8
commit 9df0215f9b

View File

@ -511,6 +511,24 @@ prof_lookup(prof_bt_t *bt)
return (ret); return (ret);
} }
static inline void
prof_sample_threshold_update(void)
{
uint64_t r;
double u;
/*
* Compute prof_sample_threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*/
prn64(r, 53, prof_sample_prn_state, (uint64_t)1125899906842625LLU,
1058392653243283975);
u = (double)r * (1.0/9007199254740992.0L);
prof_sample_threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
}
prof_thr_cnt_t * prof_thr_cnt_t *
prof_alloc_prep(size_t size) prof_alloc_prep(size_t size)
{ {
@ -518,21 +536,41 @@ prof_alloc_prep(size_t size)
void *vec[prof_bt_max]; void *vec[prof_bt_max];
prof_bt_t bt; prof_bt_t bt;
/* if (opt_lg_prof_sample == 0) {
* Determine whether to capture a backtrace based on whether size is /*
* enough for prof_accum to reach prof_sample_threshold. However, * Don't bother with sampling logic, since sampling interval is
* delay updating these variables until prof_{m,re}alloc(), because we * 1.
* don't know for sure that the allocation will succeed. */
*
* Use subtraction rather than addition to avoid potential integer
* overflow.
*/
if (size >= prof_sample_threshold - prof_sample_accum) {
bt_init(&bt, vec); bt_init(&bt, vec);
prof_backtrace(&bt, 2, prof_bt_max); prof_backtrace(&bt, 2, prof_bt_max);
ret = prof_lookup(&bt); ret = prof_lookup(&bt);
} else } else {
ret = (prof_thr_cnt_t *)(uintptr_t)1U; if (prof_sample_threshold == 0) {
/*
* Initialize. Seed the prng differently for each
* thread.
*/
prof_sample_prn_state = (uint64_t)(uintptr_t)&size;
prof_sample_threshold_update();
}
/*
* Determine whether to capture a backtrace based on whether
* size is enough for prof_accum to reach
* prof_sample_threshold. However, delay updating these
* variables until prof_{m,re}alloc(), because we don't know
* for sure that the allocation will succeed.
*
* Use subtraction rather than addition to avoid potential
* integer overflow.
*/
if (size >= prof_sample_threshold - prof_sample_accum) {
bt_init(&bt, vec);
prof_backtrace(&bt, 2, prof_bt_max);
ret = prof_lookup(&bt);
} else
ret = (prof_thr_cnt_t *)(uintptr_t)1U;
}
return (ret); return (ret);
} }
@ -574,24 +612,6 @@ prof_cnt_set(const void *ptr, prof_thr_cnt_t *cnt)
huge_prof_cnt_set(ptr, cnt); huge_prof_cnt_set(ptr, cnt);
} }
static inline void
prof_sample_threshold_update(void)
{
uint64_t r;
double u;
/*
* Compute prof_sample_threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*/
prn64(r, 53, prof_sample_prn_state, (uint64_t)1125899906842625LLU,
1058392653243283975);
u = (double)r * (1.0/9007199254740992.0L);
prof_sample_threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
}
static inline void static inline void
prof_sample_accum_update(size_t size) prof_sample_accum_update(size_t size)
{ {
@ -604,18 +624,10 @@ prof_sample_accum_update(size_t size)
return; return;
} }
if (prof_sample_threshold == 0) {
/* Initialize. Seed the prng differently for each thread. */
prof_sample_prn_state = (uint64_t)(uintptr_t)&size;
prof_sample_threshold_update();
}
/* Take care to avoid integer overflow. */ /* Take care to avoid integer overflow. */
if (size >= prof_sample_threshold - prof_sample_accum) { if (size >= prof_sample_threshold - prof_sample_accum) {
prof_sample_accum -= (prof_sample_threshold - size); prof_sample_accum -= (prof_sample_threshold - size);
/* /* Compute new prof_sample_threshold. */
* Compute new geometrically distributed prof_sample_threshold.
*/
prof_sample_threshold_update(); prof_sample_threshold_update();
while (prof_sample_accum >= prof_sample_threshold) { while (prof_sample_accum >= prof_sample_threshold) {
prof_sample_accum -= prof_sample_threshold; prof_sample_accum -= prof_sample_threshold;