Implement/test/fix prof-related mallctl's.

Implement/test/fix the opt.prof_thread_active_init,
prof.thread_active_init, and thread.prof.active mallctl's.

Test/fix the thread.prof.name mallctl.

Refactor opt_prof_active to be read-only and move mutable state into the
prof_active variable.  Stop leaning on ctl-related locking for
protection.
This commit is contained in:
Jason Evans 2014-10-03 23:25:30 -07:00
parent 551ebc4364
commit fc12c0b8bc
11 changed files with 545 additions and 66 deletions

View File

@ -123,9 +123,11 @@ TESTS_UNIT := $(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/mq.c \
$(srcroot)test/unit/mtx.c \
$(srcroot)test/unit/prof_accum.c \
$(srcroot)test/unit/prof_active.c \
$(srcroot)test/unit/prof_gdump.c \
$(srcroot)test/unit/prof_idump.c \
$(srcroot)test/unit/prof_reset.c \
$(srcroot)test/unit/prof_thread_name.c \
$(srcroot)test/unit/ql.c \
$(srcroot)test/unit/qr.c \
$(srcroot)test/unit/quarantine.c \

View File

@ -1061,6 +1061,21 @@ malloc_conf = "xmalloc:true";]]></programlisting>
This option is enabled by default.</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_thread_active_init">
<term>
<mallctl>opt.prof_thread_active_init</mallctl>
(<type>bool</type>)
<literal>r-</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Initial setting for <link
linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
in newly created threads. The initial setting for newly created threads
can also be changed during execution via the <link
linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
mallctl. This option is enabled by default.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_prof_sample">
<term>
<mallctl>opt.lg_prof_sample</mallctl>
@ -1264,7 +1279,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<term>
<mallctl>thread.prof.name</mallctl>
(<type>const char *</type>)
<literal>rw</literal>
<literal>r-</literal> or
<literal>-w</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Get/set the descriptive name associated with the calling
@ -1272,7 +1288,15 @@ malloc_conf = "xmalloc:true";]]></programlisting>
created, so the input string need not be maintained after this interface
completes execution. The output string of this interface should be
copied for non-ephemeral uses, because multiple implementation details
can cause asynchronous string deallocation.</para></listitem>
can cause asynchronous string deallocation. Furthermore, each
invocation of this interface can only read or write; simultaneous
read/write is not supported due to string lifetime limitations. The
name string must nil-terminated and comprised only of characters in the
sets recognized
by <citerefentry><refentrytitle>isgraph</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> and
<citerefentry><refentrytitle>isblank</refentrytitle>
<manvolnum>3</manvolnum></citerefentry>.</para></listitem>
</varlistentry>
<varlistentry id="thread.prof.active">
@ -1283,7 +1307,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
[<option>--enable-prof</option>]
</term>
<listitem><para>Control whether sampling is currently active for the
calling thread. This is a deactivation mechanism in addition to <link
calling thread. This is an activation mechanism in addition to <link
linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
be active for the calling thread to sample. This flag is enabled by
default.</para></listitem>
@ -1508,6 +1532,20 @@ malloc_conf = "xmalloc:true";]]></programlisting>
and returning the new arena index.</para></listitem>
</varlistentry>
<varlistentry id="prof.thread_active_init">
<term>
<mallctl>prof.thread_active_init</mallctl>
(<type>bool</type>)
<literal>rw</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Control the initial setting for <link
linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
in newly created threads. See the <link
linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
option for additional information.</para></listitem>
</varlistentry>
<varlistentry id="prof.active">
<term>
<mallctl>prof.active</mallctl>
@ -1518,8 +1556,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Control whether sampling is currently active. See the
<link
linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
option for additional information.
</para></listitem>
option for additional information, as well as the interrelated <link
linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
mallctl.</para></listitem>
</varlistentry>
<varlistentry id="prof.dump">
@ -1548,7 +1587,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Reset all memory profile statistics, and optionally
update the sample rate (see <link
linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
and <link linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl>).
and <link
linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
</para></listitem>
</varlistentry>

View File

@ -285,6 +285,9 @@ opt_zero
p2rz
pages_purge
pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace
@ -316,6 +319,8 @@ prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
prof_thread_active_get
prof_thread_active_init_get
prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set

View File

@ -215,13 +215,8 @@ typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
@ -235,6 +230,9 @@ extern char opt_prof_prefix[
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
@ -274,9 +272,13 @@ prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
const char *prof_thread_name_get(void);
bool prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_active_get(void);
bool prof_active_set(bool active);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(void);
bool prof_thread_active_set(bool active);
bool prof_thread_active_init_get(void);
bool prof_thread_active_init_set(bool active_init);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
@ -290,6 +292,7 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool prof_active_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
@ -305,6 +308,19 @@ void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_INLINE bool
prof_active_get_unlocked(void)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
@ -401,8 +417,8 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
assert(usize == s2u(usize));
if (!opt_prof_active || likely(prof_sample_accum_update(tsd, usize,
update, &tdata)))
if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
usize, update, &tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);

View File

@ -7,7 +7,6 @@
/*
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
@ -104,6 +103,7 @@ CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
CTL_PROTO(opt_prof_thread_active_init)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
@ -131,6 +131,7 @@ CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_extend)
CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_reset)
@ -253,6 +254,7 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
@ -318,6 +320,7 @@ static const ctl_named_node_t arenas_node[] = {
};
static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("reset"), CTL(prof_reset)},
@ -979,6 +982,14 @@ ctl_postfork_child(void)
} \
} while (0)
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
@ -1208,7 +1219,9 @@ CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
opt_prof_thread_active_init, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
@ -1332,12 +1345,12 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const char *oldname;
if (!config_prof)
return (ENOENT);
oldname = prof_thread_name_get();
READ_XOR_WRITE();
if (newp != NULL) {
tsd_t *tsd;
@ -1352,12 +1365,13 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
goto label_return;
}
if (prof_thread_name_set(tsd, *(const char **)newp)) {
ret = EAGAIN;
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0)
goto label_return;
}
}
} else {
const char *oldname = prof_thread_name_get();
READ(oldname, const char *);
}
ret = 0;
label_return:
@ -1660,6 +1674,31 @@ label_return:
/******************************************************************************/
static int
prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
if (!config_prof)
return (ENOENT);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_thread_active_init_set(*(bool *)newp);
} else
oldval = prof_thread_active_init_get();
READ(oldval, bool);
ret = 0;
label_return:
return (ret);
}
static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
@ -1670,22 +1709,18 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (!config_prof)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
oldval = opt_prof_active;
if (newp != NULL) {
/*
* The memory barriers will tend to make opt_prof_active
* propagate faster on systems with weak memory ordering.
*/
mb_write();
WRITE(opt_prof_active, bool);
mb_write();
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_active_set(*(bool *)newp);
} else
oldval = prof_active_get();
READ(oldval, bool);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}

View File

@ -655,6 +655,8 @@ malloc_conf_init(void)
"prof_prefix", "jeprof")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
true)
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
"prof_thread_active_init", true)
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0,
(sizeof(uint64_t) << 3) - 1, true)

View File

@ -16,6 +16,7 @@
bool opt_prof = false;
bool opt_prof_active = true;
bool opt_prof_thread_active_init = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
@ -29,6 +30,20 @@ char opt_prof_prefix[
#endif
1];
/*
* Initialized as opt_prof_active, and accessed via
* prof_active_[gs]et{_unlocked,}().
*/
bool prof_active;
static malloc_mutex_t prof_active_mtx;
/*
* Initialized as opt_prof_thread_active_init, and accessed via
* prof_thread_active_init_[gs]et().
*/
static bool prof_thread_active_init;
static malloc_mutex_t prof_thread_active_init_mtx;
uint64_t prof_interval = 0;
size_t lg_prof_sample;
@ -103,6 +118,7 @@ static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
static bool prof_tdata_should_destroy(prof_tdata_t *tdata);
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata);
static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
/******************************************************************************/
/* Red-black trees. */
@ -1593,7 +1609,8 @@ prof_thr_uid_alloc(void)
}
static prof_tdata_t *
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
char *thread_name, bool active)
{
prof_tdata_t *tdata;
@ -1607,7 +1624,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
tdata->lock = prof_tdata_mutex_choose(thr_uid);
tdata->thr_uid = thr_uid;
tdata->thr_discrim = thr_discrim;
tdata->thread_name = NULL;
tdata->thread_name = thread_name;
tdata->attached = true;
tdata->expired = false;
@ -1625,7 +1642,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
tdata->enq_gdump = false;
tdata->dumping = false;
tdata->active = true;
tdata->active = active;
malloc_mutex_lock(&tdatas_mtx);
tdata_tree_insert(&tdatas, tdata);
@ -1638,7 +1655,8 @@ prof_tdata_t *
prof_tdata_init(tsd_t *tsd)
{
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0));
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
prof_thread_active_init_get()));
}
/* tdata->lock must be held. */
@ -1698,9 +1716,13 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
{
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim));
return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
active));
}
static bool
@ -1768,6 +1790,29 @@ prof_tdata_cleanup(tsd_t *tsd)
prof_tdata_detach(tsd, tdata);
}
bool
prof_active_get(void)
{
bool prof_active_current;
malloc_mutex_lock(&prof_active_mtx);
prof_active_current = prof_active;
malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_current);
}
bool
prof_active_set(bool active)
{
bool prof_active_old;
malloc_mutex_lock(&prof_active_mtx);
prof_active_old = prof_active;
prof_active = active;
malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_old);
}
const char *
prof_thread_name_get(void)
{
@ -1775,34 +1820,64 @@ prof_thread_name_get(void)
prof_tdata_t *tdata;
if ((tsd = tsd_tryget()) == NULL)
return (NULL);
return ("");
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (NULL);
return (tdata->thread_name);
return ("");
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
bool
static char *
prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
{
char *ret;
size_t size;
if (thread_name == NULL)
return (NULL);
size = strlen(thread_name) + 1;
if (size == 1)
return ("");
ret = imalloc(tsd, size);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
return (ret);
}
int
prof_thread_name_set(tsd_t *tsd, const char *thread_name)
{
prof_tdata_t *tdata;
size_t size;
unsigned i;
char *s;
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
return (EAGAIN);
size = strlen(thread_name) + 1;
s = imalloc(tsd, size);
/* Validate input. */
if (thread_name == NULL)
return (EFAULT);
for (i = 0; thread_name[i] != '\0'; i++) {
char c = thread_name[i];
if (!isgraph(c) && !isblank(c))
return (EFAULT);
}
s = prof_thread_name_alloc(tsd, thread_name);
if (s == NULL)
return (true);
return (EAGAIN);
memcpy(s, thread_name, size);
if (tdata->thread_name != NULL)
if (tdata->thread_name != NULL) {
idalloc(tsd, tdata->thread_name);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
tdata->thread_name = s;
return (false);
return (0);
}
bool
@ -1834,6 +1909,29 @@ prof_thread_active_set(bool active)
return (false);
}
bool
prof_thread_active_init_get(void)
{
bool active_init;
malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init);
}
bool
prof_thread_active_init_set(bool active_init)
{
bool active_init_old;
malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init_old);
}
void
prof_boot0(void)
{
@ -1882,6 +1980,14 @@ prof_boot2(void)
lg_prof_sample = opt_lg_prof_sample;
prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx))
return (true);
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);
if ((tsd = tsd_tryget()) == NULL)
return (true);
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,

View File

@ -336,7 +336,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
bool bv;
unsigned uv;
@ -355,26 +354,31 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
bool bv2; \
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
: "false", bv2 ? "true" : "false"); \
} \
}
#define OPT_WRITE_SIZE_T(n) \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
#define OPT_WRITE_CHAR_P(n) \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
@ -398,7 +402,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
prof.thread_active_init)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
@ -407,6 +413,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_BOOL_MUTABLE
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
@ -434,13 +441,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
== 0) {
if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
bv) {
if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
CTL_GET("prof.lg_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"Average profile sample interval: %"PRIu64

136
test/unit/prof_active.c Normal file
View File

@ -0,0 +1,136 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_thread_active_init:false,lg_prof_sample:0,prof_final:false";
#endif
static void
mallctl_bool_get(const char *name, bool expected, const char *func, int line)
{
bool old;
size_t sz;
sz = sizeof(old);
assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0,
"%s():%d: Unexpected mallctl failure reading %s", func, line, name);
assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
name);
}
static void
mallctl_bool_set(const char *name, bool old_expected, bool val_new,
const char *func, int line)
{
bool old;
size_t sz;
sz = sizeof(old);
assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0,
"%s():%d: Unexpected mallctl failure reading/writing %s", func,
line, name);
assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
line, name);
}
static void
mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
int line)
{
mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
}
#define mallctl_prof_active_get(a) \
mallctl_prof_active_get_impl(a, __func__, __LINE__)
static void
mallctl_prof_active_set_impl(bool prof_active_old_expected,
bool prof_active_new, const char *func, int line)
{
mallctl_bool_set("prof.active", prof_active_old_expected,
prof_active_new, func, line);
}
#define mallctl_prof_active_set(a, b) \
mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
static void
mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
const char *func, int line)
{
mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
func, line);
}
#define mallctl_thread_prof_active_get(a) \
mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
static void
mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
bool thread_prof_active_new, const char *func, int line)
{
mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
thread_prof_active_new, func, line);
}
#define mallctl_thread_prof_active_set(a, b) \
mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
static void
prof_sampling_probe_impl(bool expect_sample, const char *func, int line)
{
void *p;
size_t expected_backtraces = expect_sample ? 1 : 0;
assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
line);
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_zu_eq(prof_bt_count(), expected_backtraces,
"%s():%d: Unexpected backtrace count", func, line);
dallocx(p, 0);
}
#define prof_sampling_probe(a) \
prof_sampling_probe_impl(a, __func__, __LINE__)
TEST_BEGIN(test_prof_active)
{
test_skip_if(!config_prof);
mallctl_prof_active_get(true);
mallctl_thread_prof_active_get(false);
mallctl_prof_active_set(true, true);
mallctl_thread_prof_active_set(false, false);
/* prof.active, !thread.prof.active. */
prof_sampling_probe(false);
mallctl_prof_active_set(true, false);
mallctl_thread_prof_active_set(false, false);
/* !prof.active, !thread.prof.active. */
prof_sampling_probe(false);
mallctl_prof_active_set(false, false);
mallctl_thread_prof_active_set(false, true);
/* !prof.active, thread.prof.active. */
prof_sampling_probe(false);
mallctl_prof_active_set(false, true);
mallctl_thread_prof_active_set(true, true);
/* prof.active, thread.prof.active. */
prof_sampling_probe(true);
/* Restore settings. */
mallctl_prof_active_set(true, true);
mallctl_thread_prof_active_set(true, false);
}
TEST_END
int
main(void)
{
return (test(
test_prof_active));
}

View File

@ -22,6 +22,8 @@ TEST_BEGIN(test_prof_reset_basic)
size_t sz;
unsigned i;
test_skip_if(!config_prof);
sz = sizeof(size_t);
assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
NULL, 0), 0,
@ -90,6 +92,8 @@ TEST_BEGIN(test_prof_reset_cleanup)
void *p;
prof_dump_header_t *prof_dump_header_orig;
test_skip_if(!config_prof);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");

View File

@ -0,0 +1,128 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_active:false,prof_final:false";
#endif
static void
mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
int line)
{
const char *thread_name_old;
size_t sz;
sz = sizeof(thread_name_old);
assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0),
0, "%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
assert_str_eq(thread_name_old, thread_name_expected,
"%s():%d: Unexpected thread.prof.name value", func, line);
}
#define mallctl_thread_name_get(a) \
mallctl_thread_name_get_impl(a, __func__, __LINE__)
static void
mallctl_thread_name_set_impl(const char *thread_name, const char *func,
int line)
{
assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
sizeof(thread_name)), 0,
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
mallctl_thread_name_get_impl(thread_name, func, line);
}
#define mallctl_thread_name_set(a) \
mallctl_thread_name_set_impl(a, __func__, __LINE__)
TEST_BEGIN(test_prof_thread_name_validation)
{
const char *thread_name;
mallctl_thread_name_get("");
mallctl_thread_name_set("hi there");
/* NULL input shouldn't be allowed. */
thread_name = NULL;
assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
/* '\n' shouldn't be allowed. */
thread_name = "hi\nthere";
assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
/* Simultaneous read/write shouldn't be allowed. */
{
const char *thread_name_old;
size_t sz;
sz = sizeof(thread_name_old);
assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz,
&thread_name, sizeof(thread_name)), EPERM,
"Unexpected mallctl result writing \"%s\" to "
"thread.prof.name", thread_name);
}
mallctl_thread_name_set("");
}
TEST_END
#define NTHREADS 4
#define NRESET 25
static void *
thd_start(void *varg)
{
unsigned thd_ind = *(unsigned *)varg;
char thread_name[16] = "";
unsigned i;
malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
mallctl_thread_name_get("");
mallctl_thread_name_set(thread_name);
for (i = 0; i < NRESET; i++) {
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected error while resetting heap profile data");
mallctl_thread_name_get(thread_name);
}
mallctl_thread_name_set(thread_name);
mallctl_thread_name_set("");
return (NULL);
}
TEST_BEGIN(test_prof_thread_name_threaded)
{
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
unsigned i;
test_skip_if(!config_prof);
for (i = 0; i < NTHREADS; i++) {
thd_args[i] = i;
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
}
for (i = 0; i < NTHREADS; i++)
thd_join(thds[i], NULL);
}
TEST_END
#undef NTHREADS
#undef NRESET
int
main(void)
{
return (test(
test_prof_thread_name_validation,
test_prof_thread_name_threaded));
}