Fix "prof.reset" mallctl-related corruption.

Fix heap profiling to distinguish among otherwise identical sample sites
with interposed resets (triggered via the "prof.reset" mallctl).  This
bug could cause data structure corruption that would most likely result
in a segfault.
This commit is contained in:
Jason Evans 2015-09-09 23:16:10 -07:00
parent a306a60651
commit a00b10735a
4 changed files with 84 additions and 20 deletions

View File

@ -22,6 +22,10 @@ brevity. Much more detail can be found in the git revision history:
visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
- Fix TLS configuration such that it is enabled by default for platforms on
which it works correctly.
- Fix heap profiling to distinguish among otherwise identical sample sites
with interposed resets (triggered via the "prof.reset" mallctl). This bug
could cause data structure corruption that would most likely result in a
segfault.
* 4.0.0 (August 17, 2015)

View File

@ -90,10 +90,11 @@ struct prof_tctx_s {
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_uid, necessary because tdata may be defunct during
* teardown.
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;

View File

@ -138,10 +138,17 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
uint64_t a_thr_uid = a->thr_uid;
uint64_t b_thr_uid = b->thr_uid;
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
if (ret == 0) {
uint64_t a_thr_discrim = a->thr_discrim;
uint64_t b_thr_discrim = b->thr_discrim;
ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
b_thr_discrim);
if (ret == 0) {
uint64_t a_tctx_uid = a->tctx_uid;
uint64_t b_tctx_uid = b->tctx_uid;
ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid);
ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
b_tctx_uid);
}
}
return (ret);
}
@ -791,6 +798,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
}
ret.p->tdata = tdata;
ret.p->thr_uid = tdata->thr_uid;
ret.p->thr_discrim = tdata->thr_discrim;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
ret.p->gctx = gctx;
ret.p->tctx_uid = tdata->tctx_uid_next++;

View File

@ -16,6 +16,14 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
return (fd);
}
static void
set_prof_active(bool active)
{
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure");
}
static size_t
get_lg_prof_sample(void)
{
@ -97,15 +105,12 @@ prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
TEST_BEGIN(test_prof_reset_cleanup)
{
bool active;
void *p;
prof_dump_header_t *prof_dump_header_orig;
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");
set_prof_active(true);
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
p = mallocx(1, 0);
@ -133,9 +138,7 @@ TEST_BEGIN(test_prof_reset_cleanup)
dallocx(p, 0);
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
active = false;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while deactivating profiling");
set_prof_active(false);
}
TEST_END
@ -192,7 +195,6 @@ thd_start(void *varg)
TEST_BEGIN(test_prof_reset)
{
size_t lg_prof_sample_orig;
bool active;
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
unsigned i;
@ -208,9 +210,7 @@ TEST_BEGIN(test_prof_reset)
lg_prof_sample_orig = get_lg_prof_sample();
do_prof_reset(5);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");
set_prof_active(true);
for (i = 0; i < NTHREADS; i++) {
thd_args[i] = i;
@ -224,9 +224,7 @@ TEST_BEGIN(test_prof_reset)
assert_zu_eq(prof_tdata_count(), tdata_count,
"Unexpected remaining tdata structures");
active = false;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while deactivating profiling");
set_prof_active(false);
do_prof_reset(lg_prof_sample_orig);
}
@ -237,6 +235,58 @@ TEST_END
#undef RESET_INTERVAL
#undef DUMP_INTERVAL
/* Test sampling at the same allocation site across resets. */
#define NITER 10
TEST_BEGIN(test_xallocx)
{
size_t lg_prof_sample_orig;
unsigned i;
void *ptrs[NITER];
test_skip_if(!config_prof);
lg_prof_sample_orig = get_lg_prof_sample();
set_prof_active(true);
/* Reset profiling. */
do_prof_reset(0);
for (i = 0; i < NITER; i++) {
void *p;
size_t sz, nsz;
/* Reset profiling. */
do_prof_reset(0);
/* Allocate small object (which will be promoted). */
p = ptrs[i] = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
/* Reset profiling. */
do_prof_reset(0);
/* Perform successful xallocx(). */
sz = sallocx(p, 0);
assert_zu_eq(xallocx(p, sz, 0, 0), sz,
"Unexpected xallocx() failure");
/* Perform unsuccessful xallocx(). */
nsz = nallocx(sz+1, 0);
assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
"Unexpected xallocx() success");
}
for (i = 0; i < NITER; i++) {
/* dallocx. */
dallocx(ptrs[i], 0);
}
set_prof_active(false);
do_prof_reset(lg_prof_sample_orig);
}
TEST_END
#undef NITER
int
main(void)
{
@ -247,5 +297,6 @@ main(void)
return (test(
test_prof_reset_basic,
test_prof_reset_cleanup,
test_prof_reset));
test_prof_reset,
test_xallocx));
}