Implement the prof.gdump mallctl.

This feature makes it possible to toggle the gdump feature on/off during
program execution, whereas the the opt.prof_dump mallctl value can only
be set during program startup.

This resolves #72.
This commit is contained in:
Jason Evans 2015-01-25 21:16:57 -08:00
parent 41f2e692f6
commit 5b8ed5b7c9
7 changed files with 133 additions and 10 deletions

View File

@ -1215,13 +1215,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<literal>r-</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Trigger a memory profile dump every time the total
virtual memory exceeds the previous maximum. Profiles are dumped to
files named according to the pattern
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option. This option is disabled by default.</para></listitem>
<listitem><para>Set the initial state of <link
linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
enabled triggers a memory profile dump every time the total virtual
memory exceeds the previous maximum. This option is disabled by
default.</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_final">
@ -1687,6 +1685,22 @@ malloc_conf = "xmalloc:true";]]></programlisting>
option.</para></listitem>
</varlistentry>
<varlistentry id="prof.gdump">
<term>
<mallctl>prof.gdump</mallctl>
(<type>bool</type>)
<literal>rw</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>When enabled, trigger a memory profile dump every time
the total virtual memory exceeds the previous maximum. Profiles are
dumped to files named according to the pattern
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option.</para></listitem>
</varlistentry>
<varlistentry id="prof.reset">
<term>
<mallctl>prof.reset</mallctl>

View File

@ -329,6 +329,10 @@ prof_dump_open
prof_free
prof_free_sampled_object
prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump
prof_interval
prof_lookup

View File

@ -239,6 +239,9 @@ extern char opt_prof_prefix[
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
@ -285,6 +288,8 @@ bool prof_thread_active_get(void);
bool prof_thread_active_set(bool active);
bool prof_thread_active_init_get(void);
bool prof_thread_active_init_set(bool active_init);
bool prof_gdump_get(void);
bool prof_gdump_set(bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
@ -299,6 +304,7 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
#ifndef JEMALLOC_ENABLE_INLINE
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
@ -327,6 +333,18 @@ prof_active_get_unlocked(void)
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{

View File

@ -213,7 +213,8 @@ chunk_register(void *chunk, size_t size, bool base)
} else if (config_prof)
gdump = false;
malloc_mutex_unlock(&chunks_mtx);
if (config_prof && opt_prof && opt_prof_gdump && gdump)
if (config_prof && opt_prof && prof_gdump_get_unlocked() &&
gdump)
prof_gdump();
}
if (config_valgrind)

View File

@ -137,6 +137,7 @@ CTL_PROTO(arenas_extend)
CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_gdump)
CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample)
@ -347,6 +348,7 @@ static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("gdump"), CTL(prof_gdump)},
{NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)}
@ -1790,6 +1792,31 @@ label_return:
return (ret);
}
static int
prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
bool oldval;
if (!config_prof)
return (ENOENT);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(*(bool *)newp);
} else
oldval = prof_gdump_get();
READ(oldval, bool);
ret = 0;
label_return:
return (ret);
}
static int
prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)

View File

@ -44,6 +44,13 @@ static malloc_mutex_t prof_active_mtx;
static bool prof_thread_active_init;
static malloc_mutex_t prof_thread_active_init_mtx;
/*
* Initialized as opt_prof_gdump, and accessed via
* prof_gdump_[gs]et{_unlocked,}().
*/
bool prof_gdump_val;
static malloc_mutex_t prof_gdump_mtx;
uint64_t prof_interval = 0;
size_t lg_prof_sample;
@ -1961,6 +1968,29 @@ prof_thread_active_init_set(bool active_init)
return (active_init_old);
}
bool
prof_gdump_get(void)
{
bool prof_gdump_current;
malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_current);
}
bool
prof_gdump_set(bool gdump)
{
bool prof_gdump_old;
malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_old);
}
void
prof_boot0(void)
{
@ -2013,6 +2043,10 @@ prof_boot2(void)
if (malloc_mutex_init(&prof_active_mtx))
return (true);
prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx))
return (true);
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);

View File

@ -21,8 +21,9 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
TEST_BEGIN(test_gdump)
{
bool active;
void *p, *q;
bool active, gdump, gdump_old;
void *p, *q, *r, *s;
size_t sz;
test_skip_if(!config_prof);
@ -42,8 +43,32 @@ TEST_BEGIN(test_gdump)
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
gdump = false;
sz = sizeof(gdump_old);
assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
sizeof(gdump)), 0,
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
r = mallocx(chunksize, 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_false(did_prof_dump_open, "Unexpected profile dump");
gdump = true;
sz = sizeof(gdump_old);
assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
sizeof(gdump)), 0,
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
s = mallocx(chunksize, 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
dallocx(p, 0);
dallocx(q, 0);
dallocx(r, 0);
dallocx(s, 0);
}
TEST_END