Implement/test/fix prof-related mallctl's.
Implement/test/fix the opt.prof_thread_active_init, prof.thread_active_init, and thread.prof.active mallctl's. Test/fix the thread.prof.name mallctl. Refactor opt_prof_active to be read-only and move mutable state into the prof_active variable. Stop leaning on ctl-related locking for protection.
This commit is contained in:
73
src/ctl.c
73
src/ctl.c
@@ -7,7 +7,6 @@
|
||||
/*
|
||||
* ctl_mtx protects the following:
|
||||
* - ctl_stats.*
|
||||
* - opt_prof_active
|
||||
*/
|
||||
static malloc_mutex_t ctl_mtx;
|
||||
static bool ctl_initialized;
|
||||
@@ -104,6 +103,7 @@ CTL_PROTO(opt_lg_tcache_max)
|
||||
CTL_PROTO(opt_prof)
|
||||
CTL_PROTO(opt_prof_prefix)
|
||||
CTL_PROTO(opt_prof_active)
|
||||
CTL_PROTO(opt_prof_thread_active_init)
|
||||
CTL_PROTO(opt_lg_prof_sample)
|
||||
CTL_PROTO(opt_lg_prof_interval)
|
||||
CTL_PROTO(opt_prof_gdump)
|
||||
@@ -131,6 +131,7 @@ CTL_PROTO(arenas_nbins)
|
||||
CTL_PROTO(arenas_nhbins)
|
||||
CTL_PROTO(arenas_nlruns)
|
||||
CTL_PROTO(arenas_extend)
|
||||
CTL_PROTO(prof_thread_active_init)
|
||||
CTL_PROTO(prof_active)
|
||||
CTL_PROTO(prof_dump)
|
||||
CTL_PROTO(prof_reset)
|
||||
@@ -253,6 +254,7 @@ static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("prof"), CTL(opt_prof)},
|
||||
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
|
||||
{NAME("prof_active"), CTL(opt_prof_active)},
|
||||
{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
|
||||
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
|
||||
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
|
||||
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
|
||||
@@ -318,6 +320,7 @@ static const ctl_named_node_t arenas_node[] = {
|
||||
};
|
||||
|
||||
static const ctl_named_node_t prof_node[] = {
|
||||
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
|
||||
{NAME("active"), CTL(prof_active)},
|
||||
{NAME("dump"), CTL(prof_dump)},
|
||||
{NAME("reset"), CTL(prof_reset)},
|
||||
@@ -979,6 +982,14 @@ ctl_postfork_child(void)
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define READ_XOR_WRITE() do { \
|
||||
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
|
||||
newlen != 0)) { \
|
||||
ret = EPERM; \
|
||||
goto label_return; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define READ(v, t) do { \
|
||||
if (oldp != NULL && oldlenp != NULL) { \
|
||||
if (*oldlenp != sizeof(t)) { \
|
||||
@@ -1208,7 +1219,9 @@ CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
|
||||
opt_prof_thread_active_init, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
|
||||
@@ -1332,12 +1345,12 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
const char *oldname;
|
||||
|
||||
if (!config_prof)
|
||||
return (ENOENT);
|
||||
|
||||
oldname = prof_thread_name_get();
|
||||
READ_XOR_WRITE();
|
||||
|
||||
if (newp != NULL) {
|
||||
tsd_t *tsd;
|
||||
|
||||
@@ -1352,12 +1365,13 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
if (prof_thread_name_set(tsd, *(const char **)newp)) {
|
||||
ret = EAGAIN;
|
||||
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
|
||||
0)
|
||||
goto label_return;
|
||||
}
|
||||
} else {
|
||||
const char *oldname = prof_thread_name_get();
|
||||
READ(oldname, const char *);
|
||||
}
|
||||
READ(oldname, const char *);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
@@ -1660,6 +1674,31 @@ label_return:
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int
|
||||
prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
bool oldval;
|
||||
|
||||
if (!config_prof)
|
||||
return (ENOENT);
|
||||
|
||||
if (newp != NULL) {
|
||||
if (newlen != sizeof(bool)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
oldval = prof_thread_active_init_set(*(bool *)newp);
|
||||
} else
|
||||
oldval = prof_thread_active_init_get();
|
||||
READ(oldval, bool);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
@@ -1670,22 +1709,18 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
if (!config_prof)
|
||||
return (ENOENT);
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
|
||||
oldval = opt_prof_active;
|
||||
if (newp != NULL) {
|
||||
/*
|
||||
* The memory barriers will tend to make opt_prof_active
|
||||
* propagate faster on systems with weak memory ordering.
|
||||
*/
|
||||
mb_write();
|
||||
WRITE(opt_prof_active, bool);
|
||||
mb_write();
|
||||
}
|
||||
if (newlen != sizeof(bool)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
oldval = prof_active_set(*(bool *)newp);
|
||||
} else
|
||||
oldval = prof_active_get();
|
||||
READ(oldval, bool);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@@ -655,6 +655,8 @@ malloc_conf_init(void)
|
||||
"prof_prefix", "jeprof")
|
||||
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
|
||||
true)
|
||||
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
||||
"prof_thread_active_init", true)
|
||||
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
||||
"lg_prof_sample", 0,
|
||||
(sizeof(uint64_t) << 3) - 1, true)
|
||||
|
142
src/prof.c
142
src/prof.c
@@ -16,6 +16,7 @@
|
||||
|
||||
bool opt_prof = false;
|
||||
bool opt_prof_active = true;
|
||||
bool opt_prof_thread_active_init = true;
|
||||
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
|
||||
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
|
||||
bool opt_prof_gdump = false;
|
||||
@@ -29,6 +30,20 @@ char opt_prof_prefix[
|
||||
#endif
|
||||
1];
|
||||
|
||||
/*
|
||||
* Initialized as opt_prof_active, and accessed via
|
||||
* prof_active_[gs]et{_unlocked,}().
|
||||
*/
|
||||
bool prof_active;
|
||||
static malloc_mutex_t prof_active_mtx;
|
||||
|
||||
/*
|
||||
* Initialized as opt_prof_thread_active_init, and accessed via
|
||||
* prof_thread_active_init_[gs]et().
|
||||
*/
|
||||
static bool prof_thread_active_init;
|
||||
static malloc_mutex_t prof_thread_active_init_mtx;
|
||||
|
||||
uint64_t prof_interval = 0;
|
||||
|
||||
size_t lg_prof_sample;
|
||||
@@ -103,6 +118,7 @@ static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
|
||||
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
|
||||
static bool prof_tdata_should_destroy(prof_tdata_t *tdata);
|
||||
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata);
|
||||
static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Red-black trees. */
|
||||
@@ -1593,7 +1609,8 @@ prof_thr_uid_alloc(void)
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
|
||||
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
||||
char *thread_name, bool active)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
@@ -1607,7 +1624,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
|
||||
tdata->lock = prof_tdata_mutex_choose(thr_uid);
|
||||
tdata->thr_uid = thr_uid;
|
||||
tdata->thr_discrim = thr_discrim;
|
||||
tdata->thread_name = NULL;
|
||||
tdata->thread_name = thread_name;
|
||||
tdata->attached = true;
|
||||
tdata->expired = false;
|
||||
|
||||
@@ -1625,7 +1642,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
|
||||
tdata->enq_gdump = false;
|
||||
|
||||
tdata->dumping = false;
|
||||
tdata->active = true;
|
||||
tdata->active = active;
|
||||
|
||||
malloc_mutex_lock(&tdatas_mtx);
|
||||
tdata_tree_insert(&tdatas, tdata);
|
||||
@@ -1638,7 +1655,8 @@ prof_tdata_t *
|
||||
prof_tdata_init(tsd_t *tsd)
|
||||
{
|
||||
|
||||
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0));
|
||||
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
|
||||
prof_thread_active_init_get()));
|
||||
}
|
||||
|
||||
/* tdata->lock must be held. */
|
||||
@@ -1698,9 +1716,13 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
|
||||
{
|
||||
uint64_t thr_uid = tdata->thr_uid;
|
||||
uint64_t thr_discrim = tdata->thr_discrim + 1;
|
||||
char *thread_name = (tdata->thread_name != NULL) ?
|
||||
prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
|
||||
bool active = tdata->active;
|
||||
|
||||
prof_tdata_detach(tsd, tdata);
|
||||
return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim));
|
||||
return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
|
||||
active));
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -1768,6 +1790,29 @@ prof_tdata_cleanup(tsd_t *tsd)
|
||||
prof_tdata_detach(tsd, tdata);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_active_get(void)
|
||||
{
|
||||
bool prof_active_current;
|
||||
|
||||
malloc_mutex_lock(&prof_active_mtx);
|
||||
prof_active_current = prof_active;
|
||||
malloc_mutex_unlock(&prof_active_mtx);
|
||||
return (prof_active_current);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_active_set(bool active)
|
||||
{
|
||||
bool prof_active_old;
|
||||
|
||||
malloc_mutex_lock(&prof_active_mtx);
|
||||
prof_active_old = prof_active;
|
||||
prof_active = active;
|
||||
malloc_mutex_unlock(&prof_active_mtx);
|
||||
return (prof_active_old);
|
||||
}
|
||||
|
||||
const char *
|
||||
prof_thread_name_get(void)
|
||||
{
|
||||
@@ -1775,34 +1820,64 @@ prof_thread_name_get(void)
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (NULL);
|
||||
return ("");
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata == NULL)
|
||||
return (NULL);
|
||||
return (tdata->thread_name);
|
||||
return ("");
|
||||
return (tdata->thread_name != NULL ? tdata->thread_name : "");
|
||||
}
|
||||
|
||||
bool
|
||||
static char *
|
||||
prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
|
||||
{
|
||||
char *ret;
|
||||
size_t size;
|
||||
|
||||
if (thread_name == NULL)
|
||||
return (NULL);
|
||||
|
||||
size = strlen(thread_name) + 1;
|
||||
if (size == 1)
|
||||
return ("");
|
||||
|
||||
ret = imalloc(tsd, size);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
memcpy(ret, thread_name, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
prof_thread_name_set(tsd_t *tsd, const char *thread_name)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
size_t size;
|
||||
unsigned i;
|
||||
char *s;
|
||||
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata == NULL)
|
||||
return (true);
|
||||
return (EAGAIN);
|
||||
|
||||
size = strlen(thread_name) + 1;
|
||||
s = imalloc(tsd, size);
|
||||
/* Validate input. */
|
||||
if (thread_name == NULL)
|
||||
return (EFAULT);
|
||||
for (i = 0; thread_name[i] != '\0'; i++) {
|
||||
char c = thread_name[i];
|
||||
if (!isgraph(c) && !isblank(c))
|
||||
return (EFAULT);
|
||||
}
|
||||
|
||||
s = prof_thread_name_alloc(tsd, thread_name);
|
||||
if (s == NULL)
|
||||
return (true);
|
||||
return (EAGAIN);
|
||||
|
||||
memcpy(s, thread_name, size);
|
||||
if (tdata->thread_name != NULL)
|
||||
if (tdata->thread_name != NULL) {
|
||||
idalloc(tsd, tdata->thread_name);
|
||||
tdata->thread_name = s;
|
||||
return (false);
|
||||
tdata->thread_name = NULL;
|
||||
}
|
||||
if (strlen(s) > 0)
|
||||
tdata->thread_name = s;
|
||||
return (0);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -1834,6 +1909,29 @@ prof_thread_active_set(bool active)
|
||||
return (false);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_thread_active_init_get(void)
|
||||
{
|
||||
bool active_init;
|
||||
|
||||
malloc_mutex_lock(&prof_thread_active_init_mtx);
|
||||
active_init = prof_thread_active_init;
|
||||
malloc_mutex_unlock(&prof_thread_active_init_mtx);
|
||||
return (active_init);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_thread_active_init_set(bool active_init)
|
||||
{
|
||||
bool active_init_old;
|
||||
|
||||
malloc_mutex_lock(&prof_thread_active_init_mtx);
|
||||
active_init_old = prof_thread_active_init;
|
||||
prof_thread_active_init = active_init;
|
||||
malloc_mutex_unlock(&prof_thread_active_init_mtx);
|
||||
return (active_init_old);
|
||||
}
|
||||
|
||||
void
|
||||
prof_boot0(void)
|
||||
{
|
||||
@@ -1882,6 +1980,14 @@ prof_boot2(void)
|
||||
|
||||
lg_prof_sample = opt_lg_prof_sample;
|
||||
|
||||
prof_active = opt_prof_active;
|
||||
if (malloc_mutex_init(&prof_active_mtx))
|
||||
return (true);
|
||||
|
||||
prof_thread_active_init = opt_prof_thread_active_init;
|
||||
if (malloc_mutex_init(&prof_thread_active_init_mtx))
|
||||
return (true);
|
||||
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (true);
|
||||
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
|
||||
|
33
src/stats.c
33
src/stats.c
@@ -336,7 +336,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"___ Begin jemalloc statistics ___\n");
|
||||
if (general) {
|
||||
int err;
|
||||
const char *cpv;
|
||||
bool bv;
|
||||
unsigned uv;
|
||||
@@ -355,26 +354,31 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
bv ? "enabled" : "disabled");
|
||||
|
||||
#define OPT_WRITE_BOOL(n) \
|
||||
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %s\n", bv ? "true" : "false"); \
|
||||
}
|
||||
#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
|
||||
bool bv2; \
|
||||
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
|
||||
je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
|
||||
: "false", bv2 ? "true" : "false"); \
|
||||
} \
|
||||
}
|
||||
#define OPT_WRITE_SIZE_T(n) \
|
||||
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zu\n", sv); \
|
||||
}
|
||||
#define OPT_WRITE_SSIZE_T(n) \
|
||||
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zd\n", ssv); \
|
||||
}
|
||||
#define OPT_WRITE_CHAR_P(n) \
|
||||
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": \"%s\"\n", cpv); \
|
||||
}
|
||||
@@ -398,7 +402,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
||||
OPT_WRITE_BOOL(prof)
|
||||
OPT_WRITE_CHAR_P(prof_prefix)
|
||||
OPT_WRITE_BOOL(prof_active)
|
||||
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
|
||||
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
|
||||
prof.thread_active_init)
|
||||
OPT_WRITE_SSIZE_T(lg_prof_sample)
|
||||
OPT_WRITE_BOOL(prof_accum)
|
||||
OPT_WRITE_SSIZE_T(lg_prof_interval)
|
||||
@@ -407,6 +413,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
OPT_WRITE_BOOL(prof_leak)
|
||||
|
||||
#undef OPT_WRITE_BOOL
|
||||
#undef OPT_WRITE_BOOL_MUTABLE
|
||||
#undef OPT_WRITE_SIZE_T
|
||||
#undef OPT_WRITE_SSIZE_T
|
||||
#undef OPT_WRITE_CHAR_P
|
||||
@@ -434,13 +441,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Min active:dirty page ratio per arena: N/A\n");
|
||||
}
|
||||
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
|
||||
== 0) {
|
||||
if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Maximum thread-cached size class: %zu\n", sv);
|
||||
}
|
||||
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
|
||||
bv) {
|
||||
if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
|
||||
CTL_GET("prof.lg_sample", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Average profile sample interval: %"PRIu64
|
||||
|
Reference in New Issue
Block a user