Add "thread.idle" mallctl.

This can encapsulate various internal cleaning logic, and can be used to free up
resources before a long sleep.
This commit is contained in:
David Goldblatt 2020-01-22 11:13:26 -08:00 committed by David Goldblatt
parent f81341a48b
commit 6a622867ca
3 changed files with 137 additions and 2 deletions

View File

@ -1654,6 +1654,28 @@ malloc_conf = "xmalloc:true";]]></programlisting>
default.</para></listitem>
</varlistentry>
<varlistentry id="thread.idle">
<term>
<mallctl>thread.idle</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Hints to jemalloc that the calling thread will be idle
for some nontrivial period of time (say, on the order of seconds), and
that doing some cleanup operations may be beneficial. There are no
guarantees as to what specific operations will be performed; currently
this flushes the caller's tcache and may (according to some heuristic)
purge its associated arena.</para>
<para>This is not intended to be a general-purpose background activity
mechanism, and threads should not wake up multiple times solely to call
it. Rather, a thread waiting for a task should do a timed wait first,
call <link linkend="thread.idle"><mallctl>thread.idle</mallctl><link> if
no task appears in the timeout interval, and then do an untimed wait.
For such a background activity mechanism, see
<link linked="background_thread"><mallctl>background_thread</mallctl></link>.
</para></listitem>
</varlistentry>
<varlistentry id="tcache.create">
<term>
<mallctl>tcache.create</mallctl>

View File

@ -68,6 +68,7 @@ CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
CTL_PROTO(thread_idle)
CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
@ -293,7 +294,8 @@ static const ctl_named_node_t thread_node[] = {
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, thread_tcache)},
{NAME("prof"), CHILD(named, thread_prof)}
{NAME("prof"), CHILD(named, thread_prof)},
{NAME("idle"), CTL(thread_idle)}
};
static const ctl_named_node_t config_node[] = {
@ -1900,6 +1902,12 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
goto label_return;
}
/*
* Slightly counterintuitively, READONLY() really just requires that the
* call isn't trying to write, and WRITEONLY() just requires that it
* isn't trying to read; hence, adding both requires that the operation
* is neither a read nor a write.
*/
READONLY();
WRITEONLY();
@ -1971,6 +1979,41 @@ label_return:
return ret;
}
static int
thread_idle_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
/* See the comment in thread_tcache_flush_ctl. */
READONLY();
WRITEONLY();
if (tcache_available(tsd)) {
tcache_flush(tsd);
}
/*
* This heuristic is perhaps not the most well-considered. But it
* matches the only idling policy we have experience with in the status
* quo. Over time we should investigate more principled approaches.
*/
if (opt_narenas > ncpus * 2) {
arena_t *arena = arena_choose(tsd, NULL);
if (arena != NULL) {
arena_decay(tsd_tsdn(tsd), arena, false, true);
}
/*
* The missing arena case is not actually an error; a thread
* might be idle before it associates itself to one. This is
* unusual, but not wrong.
*/
}
ret = 0;
label_return:
return ret;
}
/******************************************************************************/
static int

View File

@ -882,6 +882,75 @@ TEST_BEGIN(test_hooks_exhaustion) {
}
TEST_END
TEST_BEGIN(test_thread_idle) {
/*
* We're cheating a little bit in this test, and inferring things about
* implementation internals (like tcache details). We have to;
* thread.idle has no guaranteed effects. We need stats to make these
* inferences.
*/
test_skip_if(!config_stats);
int err;
size_t sz;
size_t miblen;
bool tcache_enabled = false;
sz = sizeof(tcache_enabled);
err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
assert_d_eq(err, 0, "");
test_skip_if(!tcache_enabled);
size_t tcache_max;
sz = sizeof(tcache_max);
err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
assert_d_eq(err, 0, "");
test_skip_if(tcache_max == 0);
unsigned arena_ind;
sz = sizeof(arena_ind);
err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
assert_d_eq(err, 0, "");
/* We're going to do an allocation of size 1, which we know is small. */
size_t mib[5];
miblen = sizeof(mib)/sizeof(mib[0]);
err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
assert_d_eq(err, 0, "");
mib[2] = arena_ind;
/*
* This alloc and dalloc should leave something in the tcache, in a
* small size's cache bin.
*/
void *ptr = mallocx(1, 0);
dallocx(ptr, 0);
uint64_t epoch;
err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
assert_d_eq(err, 0, "");
uint64_t small_dalloc_pre_idle;
sz = sizeof(small_dalloc_pre_idle);
err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
assert_d_eq(err, 0, "");
err = mallctl("thread.idle", NULL, NULL, NULL, 0);
assert_d_eq(err, 0, "");
err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
assert_d_eq(err, 0, "");
uint64_t small_dalloc_post_idle;
sz = sizeof(small_dalloc_post_idle);
err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
assert_d_eq(err, 0, "");
assert_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
"Purge didn't flush the tcache");
}
TEST_END
int
main(void) {
return test(
@ -913,5 +982,6 @@ main(void) {
test_prof_active,
test_stats_arenas,
test_hooks,
test_hooks_exhaustion);
test_hooks_exhaustion,
test_thread_idle);
}