Implement the arena.<i>.reset mallctl.

This makes it possible to discard all of an arena's allocations in a
single operation.

This resolves #146.
This commit is contained in:
Jason Evans 2016-04-22 14:37:17 -07:00
parent 66cd953514
commit 19ff2cefba
8 changed files with 411 additions and 39 deletions

View File

@ -135,7 +135,9 @@ C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
TESTS_UNIT := $(srcroot)test/unit/atomic.c \
TESTS_UNIT := \
$(srcroot)test/unit/arena_reset.c \
$(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \

View File

@ -1558,6 +1558,23 @@ malloc_conf = "xmalloc:true";]]></programlisting>
details.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.reset">
<term>
<mallctl>arena.&lt;i&gt;.reset</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Discard all of the arena's extant allocations. This
interface can only be used with arenas created via <link
linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link>. None
of the arena's discarded/cached allocations may accessed afterward. As
part of this requirement, all thread caches which were used to
allocate/deallocate in conjunction with the arena must be flushed
beforehand. This interface cannot be used if running inside Valgrind,
nor if the <link linkend="opt.quarantine">quarantine</link> size is
non-zero.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.dss">
<term>
<mallctl>arena.&lt;i&gt;.dss</mallctl>

View File

@ -330,6 +330,10 @@ struct arena_s {
dss_prec_t dss_prec;
/* Extant arena chunks. */
ql_head(extent_node_t) achunks;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@ -533,6 +537,7 @@ ssize_t arena_decay_time_get(tsd_t *tsd, arena_t *arena);
bool arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time);
void arena_purge(tsd_t *tsd, arena_t *arena, bool all);
void arena_maybe_purge(tsd_t *tsd, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,

View File

@ -48,7 +48,7 @@ struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
/* Linkage for arena's huge and node_cache lists. */
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};

View File

@ -100,6 +100,7 @@ arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc

View File

@ -738,14 +738,61 @@ arena_chunk_alloc(tsd_t *tsd, arena_t *arena)
return (NULL);
}
ql_elm_new(&chunk->node, ql_link);
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
arena_chunk_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
{
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted even if
* header decommit fails, since treating a partially committed
* chunk as committed has a high potential for causing later
* access of decommitted memory.
*/
chunk_hooks = chunk_hooks_get(tsd, arena);
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
chunk_dalloc_cache(tsd, arena, &chunk_hooks, (void *)chunk, chunksize,
committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
}
static void
arena_spare_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *spare)
{
assert(arena->spare != spare);
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
arena_chunk_discard(tsd, arena, spare);
}
static void
arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
{
arena_chunk_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
@ -761,43 +808,11 @@ arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool committed;
arena->spare = chunk;
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
chunk_deregister(spare, &spare->node);
committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted
* even if header decommit fails, since treating a
* partially committed chunk as committed has a high
* potential for causing later access of decommitted
* memory.
*/
chunk_hooks = chunk_hooks_get(tsd, arena);
chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_cache(tsd, arena, &chunk_hooks, (void *)spare,
chunksize, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
} else
arena->spare = chunk;
ql_remove(&arena->achunks, &chunk->node, ql_link);
spare = arena->spare;
arena->spare = chunk;
if (spare != NULL)
arena_spare_discard(tsd, arena, spare);
}
static void
@ -1802,6 +1817,140 @@ arena_purge(tsd_t *tsd, arena_t *arena, bool all)
malloc_mutex_unlock(tsd, &arena->lock);
}
static void
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
{
size_t pageind, npages;
cassert(config_prof);
assert(opt_prof);
/*
* Iterate over the allocated runs and remove profiled allocations from
* the sample set.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
if (arena_mapbits_large_get(chunk, pageind) != 0) {
void *ptr = (void *)((uintptr_t)chunk + (pageind
<< LG_PAGE));
size_t usize = isalloc(tsd, ptr, config_prof);
prof_free(tsd, ptr, usize);
npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
} else {
/* Skip small run. */
size_t binind = arena_mapbits_binind_get(chunk,
pageind);
arena_bin_info_t *bin_info =
&arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
} else {
/* Skip unallocated run. */
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
assert(pageind + npages <= chunk_npages);
}
}
void
arena_reset(tsd_t *tsd, arena_t *arena)
{
unsigned i;
extent_node_t *node;
/*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(node, &arena->achunks, ql_link) {
arena_achunk_prof_reset(tsd, arena,
extent_node_addr_get(node));
}
}
/* Huge allocations. */
malloc_mutex_lock(tsd, &arena->huge_mtx);
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_node_addr_get(node);
malloc_mutex_unlock(tsd, &arena->huge_mtx);
/* Remove huge allocation from prof sample set. */
if (config_prof && opt_prof) {
size_t usize;
usize = isalloc(tsd, ptr, config_prof);
prof_free(tsd, ptr, usize);
}
huge_dalloc(tsd, ptr);
malloc_mutex_lock(tsd, &arena->huge_mtx);
}
malloc_mutex_unlock(tsd, &arena->huge_mtx);
malloc_mutex_lock(tsd, &arena->lock);
/* Bins. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd, &bin->lock);
bin->runcur = NULL;
arena_run_heap_new(&bin->runs);
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curruns = 0;
}
malloc_mutex_unlock(tsd, &bin->lock);
}
/*
* Re-initialize runs_dirty such that the chunks_cache and runs_dirty
* chains directly correspond.
*/
qr_new(&arena->runs_dirty, rd_link);
for (node = qr_next(&arena->chunks_cache, cc_link);
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
qr_new(&node->rd, rd_link);
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
}
/* Arena chunks. */
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, node, ql_link);
arena_chunk_discard(tsd, arena, extent_node_addr_get(node));
}
/* Spare. */
if (arena->spare != NULL) {
arena_chunk_discard(tsd, arena, arena->spare);
arena->spare = NULL;
}
assert(!arena->purging);
arena->nactive = 0;
for(i = 0; i < runs_avail_nclasses; i++)
arena_run_heap_new(&arena->runs_avail[i]);
malloc_mutex_unlock(tsd, &arena->lock);
}
static void
arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
@ -3373,6 +3522,8 @@ arena_new(tsd_t *tsd, unsigned ind)
arena->dss_prec = chunk_dss_prec_get(tsd);
ql_new(&arena->achunks);
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();

View File

@ -120,6 +120,7 @@ CTL_PROTO(tcache_destroy)
static void arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult)
CTL_PROTO(arena_i_decay_time)
@ -299,6 +300,7 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
@ -1602,6 +1604,40 @@ label_return:
return (ret);
}
static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind;
arena_t *arena;
READONLY();
WRITEONLY();
if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
unlikely(opt_quarantine))) {
ret = EFAULT;
goto label_return;
}
arena_ind = (unsigned)mib[1];
if (config_debug) {
malloc_mutex_lock(tsd, &ctl_mtx);
assert(arena_ind < ctl_stats.narenas);
malloc_mutex_unlock(tsd, &ctl_mtx);
}
assert(arena_ind >= opt_narenas);
arena = arena_get(tsd, arena_ind, false);
arena_reset(tsd, arena);
ret = 0;
label_return:
return (ret);
}
static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)

160
test/unit/arena_reset.c Normal file
View File

@ -0,0 +1,160 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,lg_prof_sample:0";
#endif
static unsigned
get_nsizes_impl(const char *cmd)
{
unsigned ret;
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
}
static unsigned
get_nsmall(void)
{
return (get_nsizes_impl("arenas.nbins"));
}
static unsigned
get_nlarge(void)
{
return (get_nsizes_impl("arenas.nlruns"));
}
static unsigned
get_nhuge(void)
{
return (get_nsizes_impl("arenas.nhchunks"));
}
static size_t
get_size_impl(const char *cmd, size_t ind)
{
size_t ret;
size_t z;
size_t mib[4];
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
}
static size_t
get_small_size(size_t ind)
{
return (get_size_impl("arenas.bin.0.size", ind));
}
static size_t
get_large_size(size_t ind)
{
return (get_size_impl("arenas.lrun.0.size", ind));
}
static size_t
get_huge_size(size_t ind)
{
return (get_size_impl("arenas.hchunk.0.size", ind));
}
TEST_BEGIN(test_arena_reset)
{
#define NHUGE 4
unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
size_t sz, miblen;
void **ptrs;
size_t mib[3];
tsd_t *tsd;
test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill
&& unlikely(opt_quarantine)));
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
nsmall = get_nsmall();
nlarge = get_nlarge();
nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
nptrs = nsmall + nlarge + nhuge;
ptrs = (void **)malloc(nptrs * sizeof(void *));
assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
/* Allocate objects with a wide range of sizes. */
for (i = 0; i < nsmall; i++) {
sz = get_small_size(i);
ptrs[i] = mallocx(sz, MALLOCX_ARENA(arena_ind));
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, MALLOCX_ARENA(%u)) failure", sz,
arena_ind);
}
for (i = 0; i < nlarge; i++) {
sz = get_large_size(i);
ptrs[nsmall + i] = mallocx(sz, MALLOCX_ARENA(arena_ind));
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, MALLOCX_ARENA(%u)) failure", sz,
arena_ind);
}
for (i = 0; i < nhuge; i++) {
sz = get_huge_size(i);
ptrs[nsmall + nlarge + i] = mallocx(sz,
MALLOCX_ARENA(arena_ind));
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, MALLOCX_ARENA(%u)) failure", sz,
arena_ind);
}
tsd = tsd_fetch();
/* Verify allocations. */
for (i = 0; i < nptrs; i++) {
assert_zu_gt(ivsalloc(tsd, ptrs[i], false), 0,
"Allocation should have queryable size");
}
/* Reset. */
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
/* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) {
assert_zu_eq(ivsalloc(tsd, ptrs[i], false), 0,
"Allocation should no longer exist");
}
free(ptrs);
}
TEST_END
int
main(void)
{
return (test(
test_arena_reset));
}