Move unbias data to prof_data

This commit is contained in:
Yinan Zhang 2020-08-21 10:23:23 -07:00
parent 5e90fd006e
commit 8efcdc3f98
4 changed files with 39 additions and 39 deletions

View File

@ -10,6 +10,9 @@ extern malloc_mutex_t prof_dump_mtx;
extern malloc_mutex_t *gctx_locks;
extern malloc_mutex_t *tdata_locks;
extern size_t prof_unbiased_sz[SC_NSIZES];
extern size_t prof_shifted_unbiased_cnt[SC_NSIZES];
void prof_bt_hash(const void *key, size_t r_hash[2]);
bool prof_bt_keycomp(const void *k1, const void *k2);
@ -17,6 +20,7 @@ bool prof_data_init(tsd_t *tsd);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
void prof_unbias_map_init();
void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
prof_tdata_t *tdata, bool leakcheck);
prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,

View File

@ -41,9 +41,6 @@ extern uint64_t prof_interval;
* resets.
*/
extern size_t lg_prof_sample;
extern size_t prof_unbiased_sz[SC_NSIZES];
extern size_t prof_shifted_unbiased_cnt[SC_NSIZES];
void prof_unbias_map_init();
extern bool prof_booted;

View File

@ -61,8 +61,6 @@ static malloc_mutex_t prof_gdump_mtx;
uint64_t prof_interval = 0;
size_t lg_prof_sample;
size_t prof_unbiased_sz[SC_NSIZES];
size_t prof_shifted_unbiased_cnt[SC_NSIZES];
static uint64_t next_thr_uid;
static malloc_mutex_t next_thr_uid_mtx;
@ -72,40 +70,6 @@ bool prof_booted = false;
/******************************************************************************/
void prof_unbias_map_init() {
/* See the comment in prof_sample_new_event_wait */
#ifdef JEMALLOC_PROF
for (szind_t i = 0; i < SC_NSIZES; i++) {
double sz = (double)sz_index2size(i);
double rate = (double)(ZU(1) << lg_prof_sample);
double div_val = 1.0 - exp(-sz / rate);
double unbiased_sz = sz / div_val;
/*
* The "true" right value for the unbiased count is
* 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts
* as integers (for a variety of reasons -- rounding errors
* could trigger asserts, and not all libcs can properly handle
* floating point arithmetic during malloc calls inside libc).
* Rounding to an integer, though, can lead to rounding errors
* of over 30% for sizes close to the sampling rate. So
* instead, we multiply by a constant, dividing the maximum
* possible roundoff error by that constant. To avoid overflow
* in summing up size_t values, the largest safe constant we can
* pick is the size of the smallest allocation.
*/
double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
double shifted_unbiased_cnt = cnt_shift / div_val;
prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
prof_shifted_unbiased_cnt[i] = (size_t)round(
shifted_unbiased_cnt);
}
#else
unreachable();
#endif
}
/******************************************************************************/
void
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
cassert(config_prof);

View File

@ -59,6 +59,9 @@ static ckh_t bt2gctx;
*/
static prof_tdata_tree_t tdatas;
size_t prof_unbiased_sz[SC_NSIZES];
size_t prof_shifted_unbiased_cnt[SC_NSIZES];
/******************************************************************************/
/* Red-black trees. */
@ -535,6 +538,38 @@ prof_double_uint64_cast(double d) {
}
#endif
void prof_unbias_map_init() {
/* See the comment in prof_sample_new_event_wait */
#ifdef JEMALLOC_PROF
for (szind_t i = 0; i < SC_NSIZES; i++) {
double sz = (double)sz_index2size(i);
double rate = (double)(ZU(1) << lg_prof_sample);
double div_val = 1.0 - exp(-sz / rate);
double unbiased_sz = sz / div_val;
/*
* The "true" right value for the unbiased count is
* 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts
* as integers (for a variety of reasons -- rounding errors
* could trigger asserts, and not all libcs can properly handle
* floating point arithmetic during malloc calls inside libc).
* Rounding to an integer, though, can lead to rounding errors
* of over 30% for sizes close to the sampling rate. So
* instead, we multiply by a constant, dividing the maximum
* possible roundoff error by that constant. To avoid overflow
* in summing up size_t values, the largest safe constant we can
* pick is the size of the smallest allocation.
*/
double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
double shifted_unbiased_cnt = cnt_shift / div_val;
prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
prof_shifted_unbiased_cnt[i] = (size_t)round(
shifted_unbiased_cnt);
}
#else
unreachable();
#endif
}
/*
* The unbiasing story is long. The jeprof unbiasing logic was copied from
* pprof. Both shared an issue: they unbiased using the average size of the