Move unbias data to prof_data

This commit is contained in:
Yinan Zhang
2020-08-21 10:23:23 -07:00
parent 5e90fd006e
commit 8efcdc3f98
4 changed files with 39 additions and 39 deletions

View File

@@ -61,8 +61,6 @@ static malloc_mutex_t prof_gdump_mtx;
uint64_t prof_interval = 0;
size_t lg_prof_sample;
size_t prof_unbiased_sz[SC_NSIZES];
size_t prof_shifted_unbiased_cnt[SC_NSIZES];
static uint64_t next_thr_uid;
static malloc_mutex_t next_thr_uid_mtx;
@@ -72,40 +70,6 @@ bool prof_booted = false;
/******************************************************************************/
void prof_unbias_map_init() {
/* See the comment in prof_sample_new_event_wait */
#ifdef JEMALLOC_PROF
for (szind_t i = 0; i < SC_NSIZES; i++) {
double sz = (double)sz_index2size(i);
double rate = (double)(ZU(1) << lg_prof_sample);
double div_val = 1.0 - exp(-sz / rate);
double unbiased_sz = sz / div_val;
/*
* The "true" right value for the unbiased count is
* 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts
* as integers (for a variety of reasons -- rounding errors
* could trigger asserts, and not all libcs can properly handle
* floating point arithmetic during malloc calls inside libc).
* Rounding to an integer, though, can lead to rounding errors
* of over 30% for sizes close to the sampling rate. So
* instead, we multiply by a constant, dividing the maximum
* possible roundoff error by that constant. To avoid overflow
* in summing up size_t values, the largest safe constant we can
* pick is the size of the smallest allocation.
*/
double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
double shifted_unbiased_cnt = cnt_shift / div_val;
prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
prof_shifted_unbiased_cnt[i] = (size_t)round(
shifted_unbiased_cnt);
}
#else
unreachable();
#endif
}
/******************************************************************************/
void
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
cassert(config_prof);

View File

@@ -59,6 +59,9 @@ static ckh_t bt2gctx;
*/
static prof_tdata_tree_t tdatas;
size_t prof_unbiased_sz[SC_NSIZES];
size_t prof_shifted_unbiased_cnt[SC_NSIZES];
/******************************************************************************/
/* Red-black trees. */
@@ -535,6 +538,38 @@ prof_double_uint64_cast(double d) {
}
#endif
void prof_unbias_map_init() {
/* See the comment in prof_sample_new_event_wait */
#ifdef JEMALLOC_PROF
for (szind_t i = 0; i < SC_NSIZES; i++) {
double sz = (double)sz_index2size(i);
double rate = (double)(ZU(1) << lg_prof_sample);
double div_val = 1.0 - exp(-sz / rate);
double unbiased_sz = sz / div_val;
/*
* The "true" right value for the unbiased count is
* 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts
* as integers (for a variety of reasons -- rounding errors
* could trigger asserts, and not all libcs can properly handle
* floating point arithmetic during malloc calls inside libc).
* Rounding to an integer, though, can lead to rounding errors
* of over 30% for sizes close to the sampling rate. So
* instead, we multiply by a constant, dividing the maximum
* possible roundoff error by that constant. To avoid overflow
* in summing up size_t values, the largest safe constant we can
* pick is the size of the smallest allocation.
*/
double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
double shifted_unbiased_cnt = cnt_shift / div_val;
prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
prof_shifted_unbiased_cnt[i] = (size_t)round(
shifted_unbiased_cnt);
}
#else
unreachable();
#endif
}
/*
* The unbiasing story is long. The jeprof unbiasing logic was copied from
* pprof. Both shared an issue: they unbiased using the average size of the