diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index e5c229fe..b586e690 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -1547,7 +1547,8 @@ malloc_conf = "xmalloc:true";]]> Reset all memory profile statistics, and optionally update the sample rate (see opt.lg_prof_sample). + linkend="opt.lg_prof_sample">opt.lg_prof_sample + and prof.lg_sample). @@ -1558,7 +1559,7 @@ malloc_conf = "xmalloc:true";]]> r- [] - Get the sample rate (see Get the current sample rate (see opt.lg_prof_sample). diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index 5ac82f59..33f8ce01 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -292,6 +292,7 @@ prof_boot0 prof_boot1 prof_boot2 prof_bt_count +prof_dump_header prof_dump_open prof_free prof_free_sampled_object diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 3872c7ae..91c871de 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -151,22 +151,23 @@ struct prof_gctx_s { }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; -typedef enum { - prof_tdata_state_attached, /* Active thread attached, data valid. */ - prof_tdata_state_detached, /* Defunct thread, data remain valid. */ - prof_tdata_state_expired /* Predates reset, omit data from dump. */ -} prof_tdata_state_t; - struct prof_tdata_s { malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ uint64_t thr_uid; + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + /* Included in heap profile dumps if non-NULL. */ char *thread_name; - prof_tdata_state_t state; + bool attached; + bool expired; rb_node(prof_tdata_t) tdata_link; @@ -257,9 +258,13 @@ void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET +size_t prof_tdata_count(void); size_t prof_bt_count(void); +const prof_cnt_t *prof_cnt_all(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; +typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *); +extern prof_dump_header_t *prof_dump_header; #endif void prof_idump(void); bool prof_mdump(const char *filename); @@ -312,12 +317,11 @@ prof_tdata_get(tsd_t *tsd, bool create) if (unlikely(tdata == NULL)) { tdata = prof_tdata_init(tsd); tsd_prof_tdata_set(tsd, tdata); - } else if (unlikely(tdata->state == prof_tdata_state_expired)) { + } else if (unlikely(tdata->expired)) { tdata = prof_tdata_reinit(tsd, tdata); tsd_prof_tdata_set(tsd, tdata); } - assert(tdata == NULL || - tdata->state == prof_tdata_state_attached); + assert(tdata == NULL || tdata->attached); } return (tdata); diff --git a/src/prof.c b/src/prof.c index 9f10b533..0a96d85f 100644 --- a/src/prof.c +++ b/src/prof.c @@ -137,10 +137,18 @@ rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, JEMALLOC_INLINE_C int prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; - return ((a_uid > b_uid) - (a_uid < b_uid)); + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return (ret); } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, @@ -504,7 +512,7 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_maybe_destroy(). + * prof_tctx_destroy()/prof_gctx_try_destroy(). */ gctx->nlimbo = 1; tctx_tree_new(&gctx->tctxs); @@ -516,7 +524,7 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) } static void -prof_gctx_maybe_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata) +prof_gctx_try_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata) { cassert(config_prof); @@ -530,6 +538,7 @@ prof_gctx_maybe_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata) */ prof_enter(tdata); malloc_mutex_lock(gctx->lock); + assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) @@ -605,10 +614,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) * * 1) Sample an allocation associated with gctx. * 2) Deallocate the sampled object. - * 3) Successfully prof_gctx_maybe_destroy(gctx). + * 3) Successfully prof_gctx_try_destroy(gctx). * * The result would be that gctx no longer exists by the time - * this thread accesses it in prof_gctx_maybe_destroy(). + * this thread accesses it in prof_gctx_try_destroy(). */ gctx->nlimbo++; destroy_gctx = true; @@ -616,7 +625,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) destroy_gctx = false; malloc_mutex_unlock(gctx->lock); if (destroy_gctx) - prof_gctx_maybe_destroy(tsd, gctx, tdata); + prof_gctx_try_destroy(tsd, gctx, tdata); if (destroy_tdata) prof_tdata_destroy(tsd, tdata); @@ -657,7 +666,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, } else { /* * Increment nlimbo, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_maybe_destroy(). + * prof_tctx_destroy()/prof_gctx_try_destroy(). */ malloc_mutex_lock(gctx.p->lock); gctx.p->nlimbo++; @@ -710,7 +719,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ret.v = imalloc(tsd, sizeof(prof_tctx_t)); if (ret.p == NULL) { if (new_gctx) - prof_gctx_maybe_destroy(tsd, gctx, tdata); + prof_gctx_try_destroy(tsd, gctx, tdata); return (NULL); } ret.p->tdata = tdata; @@ -723,7 +732,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) malloc_mutex_unlock(tdata->lock); if (error) { if (new_gctx) - prof_gctx_maybe_destroy(tsd, gctx, tdata); + prof_gctx_try_destroy(tsd, gctx, tdata); idalloc(tsd, ret.v); return (NULL); } @@ -792,6 +801,31 @@ prof_sample_threshold_update(prof_tdata_t *tdata) #endif } +#ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return (NULL); +} + +size_t +prof_tdata_count(void) +{ + size_t tdata_count = 0; + + malloc_mutex_lock(&tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(&tdatas_mtx); + + return (tdata_count); +} +#endif + #ifdef JEMALLOC_JET size_t prof_bt_count(void) @@ -998,7 +1032,6 @@ static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { prof_tctx_t *ret; - tsd_t *tsd = (tsd_t *)arg; switch (tctx->state) { case prof_tctx_state_nominal: @@ -1008,9 +1041,7 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) tctx->state = prof_tctx_state_nominal; break; case prof_tctx_state_purgatory: - ret = tctx_tree_next(tctxs, tctx); - tctx_tree_remove(tctxs, tctx); - idalloc(tsd, tctx); + ret = tctx; goto label_return; default: not_reached(); @@ -1056,27 +1087,47 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) return (NULL); } -static prof_gctx_t * -prof_gctx_finish_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { - tsd_t *tsd = (tsd_t *)arg; prof_tdata_t *tdata = prof_tdata_get(tsd, false); - prof_tctx_t *next; - bool destroy_gctx; + prof_gctx_t *gctx; - malloc_mutex_lock(gctx->lock); - next = NULL; - do { - next = tctx_tree_iter(&gctx->tctxs, next, prof_tctx_finish_iter, - tsd); - } while (next != NULL); - gctx->nlimbo--; - destroy_gctx = prof_gctx_should_destroy(gctx); - malloc_mutex_unlock(gctx->lock); - if (destroy_gctx) - prof_gctx_maybe_destroy(tsd, gctx, tdata); + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(gctx->lock); + { + prof_tctx_t *next; - return (NULL); + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, NULL); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloc(tsd, to_destroy); + } else + next = NULL; + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(gctx->lock); + prof_gctx_try_destroy(tsd, gctx, tdata); + } else + malloc_mutex_unlock(gctx->lock); + } } static prof_tdata_t * @@ -1085,7 +1136,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) prof_cnt_t *cnt_all = (prof_cnt_t *)arg; malloc_mutex_lock(tdata->lock); - if (tdata->state != prof_tdata_state_expired) { + if (!tdata->expired) { size_t tabind; union { prof_tctx_t *p; @@ -1130,6 +1181,10 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) return (NULL); } +#ifdef JEMALLOC_JET +#undef prof_dump_header +#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) +#endif static bool prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) { @@ -1148,6 +1203,11 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) malloc_mutex_unlock(&tdatas_mtx); return (ret); } +#ifdef JEMALLOC_JET +#undef prof_dump_header +#define prof_dump_header JEMALLOC_N(prof_dump_header) +prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); +#endif /* gctx->lock is held. */ static bool @@ -1277,7 +1337,7 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) malloc_mutex_lock(gctx->lock); if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { - ret = gctx_tree_next(gctxs, gctx); + ret = gctx; goto label_return; } @@ -1302,7 +1362,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) cassert(config_prof); - tdata = prof_tdata_get(tsd, false); + tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (true); @@ -1352,7 +1412,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) if (prof_dump_close(propagate_err)) goto label_open_close_error; - gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tsd); + prof_gctx_finish(tsd, &gctxs); malloc_mutex_unlock(&prof_dump_mtx); if (leakcheck) @@ -1362,7 +1422,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) label_write_error: prof_dump_close(propagate_err); label_open_close_error: - gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tsd); + prof_gctx_finish(tsd, &gctxs); malloc_mutex_unlock(&prof_dump_mtx); return (true); } @@ -1533,7 +1593,7 @@ prof_thr_uid_alloc(void) } static prof_tdata_t * -prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid) +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim) { prof_tdata_t *tdata; @@ -1546,8 +1606,10 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid) tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; tdata->thread_name = NULL; - tdata->state = prof_tdata_state_attached; + tdata->attached = true; + tdata->expired = false; if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { @@ -1576,14 +1638,7 @@ prof_tdata_t * prof_tdata_init(tsd_t *tsd) { - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc())); -} - -prof_tdata_t * -prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) -{ - - return (prof_tdata_init_impl(tsd, tdata->thr_uid)); + return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0)); } /* tdata->lock must be held. */ @@ -1591,22 +1646,21 @@ static bool prof_tdata_should_destroy(prof_tdata_t *tdata) { - if (tdata->state == prof_tdata_state_attached) + if (tdata->attached) return (false); if (ckh_count(&tdata->bt2tctx) != 0) return (false); return (true); } +/* tdatas_mtx must be held. */ static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata) +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata) { assert(prof_tdata_should_destroy(tdata)); - malloc_mutex_lock(&tdatas_mtx); tdata_tree_remove(&tdatas, tdata); - malloc_mutex_unlock(&tdatas_mtx); if (tdata->thread_name != NULL) idalloc(tsd, tdata->thread_name); @@ -1615,14 +1669,22 @@ prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata) } static void -prof_tdata_state_transition(tsd_t *tsd, prof_tdata_t *tdata, - prof_tdata_state_t state) +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata) +{ + + malloc_mutex_lock(&tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata); + malloc_mutex_unlock(&tdatas_mtx); +} + +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tdata->lock); - if (tdata->state != state) { - tdata->state = state; + if (tdata->attached) { + tdata->attached = false; destroy_tdata = prof_tdata_should_destroy(tdata); } else destroy_tdata = false; @@ -1631,32 +1693,44 @@ prof_tdata_state_transition(tsd_t *tsd, prof_tdata_t *tdata, prof_tdata_destroy(tsd, tdata); } -static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; - prof_tdata_state_transition(tsd, tdata, prof_tdata_state_detached); + prof_tdata_detach(tsd, tdata); + return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim)); } -static void -prof_tdata_expire(tsd_t *tsd, prof_tdata_t *tdata) +static bool +prof_tdata_expire(prof_tdata_t *tdata) { + bool destroy_tdata; - prof_tdata_state_transition(tsd, tdata, prof_tdata_state_expired); + malloc_mutex_lock(tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tdata); + } else + destroy_tdata = false; + malloc_mutex_unlock(tdata->lock); + + return (destroy_tdata); } static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { - tsd_t *tsd = (tsd_t *)arg; - prof_tdata_expire(tsd, tdata); - return (NULL); + return (prof_tdata_expire(tdata) ? tdata : NULL); } void prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); @@ -1664,7 +1738,18 @@ prof_reset(tsd_t *tsd, size_t lg_sample) malloc_mutex_lock(&tdatas_mtx); lg_prof_sample = lg_sample; - tdata_tree_iter(&tdatas, NULL, prof_tdata_reset_iter, tsd); + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, NULL); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + tdata_tree_remove(&tdatas, to_destroy); + prof_tdata_destroy(tsd, to_destroy); + } else + next = NULL; + } while (next != NULL); malloc_mutex_unlock(&tdatas_mtx); malloc_mutex_unlock(&prof_dump_mtx); diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c new file mode 100644 index 00000000..73fda419 --- /dev/null +++ b/test/unit/prof_reset.c @@ -0,0 +1,238 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_active:false,lg_prof_sample:0"; +#endif + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_prof_reset_basic) +{ + size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; + size_t sz; + unsigned i; + + sz = sizeof(size_t); + assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz, + NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, 0, + "Unexpected profiling sample rate"); + sz = sizeof(size_t); + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); + + /* Test simple resets. */ + for (i = 0; i < 2; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure while resetting profile data"); + sz = sizeof(size_t); + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, + NULL, 0), 0, "Unexpected mallctl failure while reading " + "profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected profile sample rate change"); + } + + /* Test resets with prof.lg_sample changes. */ + lg_prof_sample_next = 1; + for (i = 0; i < 2; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, + &lg_prof_sample_next, sizeof(size_t)), 0, + "Unexpected mallctl failure while resetting profile data"); + sz = sizeof(size_t); + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, + NULL, 0), 0, "Unexpected mallctl failure while reading " + "profiling sample rate"); + assert_zu_eq(lg_prof_sample, lg_prof_sample_next, + "Expected profile sample rate change"); + lg_prof_sample_next = lg_prof_sample_orig; + } + + /* Make sure the test code restored prof.lg_sample. */ + sz = sizeof(size_t); + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); +} +TEST_END + +bool prof_dump_header_intercepted = false; +prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; +static bool +prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all) +{ + + prof_dump_header_intercepted = true; + memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); + + return (false); +} + +TEST_BEGIN(test_prof_reset_cleanup) +{ + bool active; + void *p; + prof_dump_header_t *prof_dump_header_orig; + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + // XXX Verify that reset actually drops backtrace count to 0. Alloc an + // object, reset, check bt count, free. prof_bt_count() doesn't do the + // right thing; we need to iterate during dump and count backtraces. + // Or, just intercept prof_dump_header(), which has enough information + // for these purposes. + + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header_orig = prof_dump_header; + prof_dump_header = prof_dump_header_intercept; + assert_false(prof_dump_header_intercepted, "Unexpected intercept"); + + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_true(prof_dump_header_intercepted, "Expected intercept"); + assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); + + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header = prof_dump_header_orig; + + dallocx(p, 0); + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + + active = false; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while deactivating profiling"); +} +TEST_END + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + unsigned i; + void *objs[OBJ_RING_BUF_COUNT]; + + memset(objs, 0, sizeof(objs)); + + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + if (i % RESET_INTERVAL == 0) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), + 0, "Unexpected error while resetting heap profile " + "data"); + } + + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); + assert_ptr_not_null(*pp, + "Unexpected btalloc() failure"); + } + } + + /* Clean up any remaining objects. */ + for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + } + + return (NULL); +} + +TEST_BEGIN(test_prof_reset) +{ + bool active; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + size_t bt_count, tdata_count; + + test_skip_if(!config_prof); + + bt_count = prof_bt_count(); + assert_zu_eq(bt_count, 0, + "Unexpected pre-existing tdata structures"); + tdata_count = prof_tdata_count(); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); + + assert_zu_eq(prof_bt_count(), bt_count, + "Unexpected bactrace count change"); + assert_zu_eq(prof_tdata_count(), tdata_count, + "Unexpected remaining tdata structures"); + + active = false; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while deactivating profiling"); +} +TEST_END +#undef NTHREADS +#undef NALLOCS_PER_THREAD +#undef OBJ_RING_BUF_COUNT +#undef RESET_INTERVAL +#undef DUMP_INTERVAL + +int +main(void) +{ + + /* Intercept dumping prior to running any tests. */ + prof_dump_open = prof_dump_open_intercept; + + return (test( + test_prof_reset_basic, + test_prof_reset_cleanup, + test_prof_reset)); +}