Implement arena.<i>.destroy .

Add MALLCTL_ARENAS_DESTROYED for accessing destroyed arena stats as an
analogue to MALLCTL_ARENAS_ALL.

This resolves #382.
This commit is contained in:
Jason Evans 2017-01-03 17:21:59 -08:00
parent 3f291d59ad
commit edf1bafb2b
16 changed files with 616 additions and 136 deletions

View File

@ -195,6 +195,10 @@ TESTS_UNIT := \
$(srcroot)test/unit/util.c \ $(srcroot)test/unit/util.c \
$(srcroot)test/unit/witness.c \ $(srcroot)test/unit/witness.c \
$(srcroot)test/unit/zero.c $(srcroot)test/unit/zero.c
ifeq (@enable_prof@, 1)
TESTS_UNIT += \
$(srcroot)test/unit/arena_reset_prof.c
endif
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \ $(srcroot)test/integration/allocated.c \
$(srcroot)test/integration/extent.c \ $(srcroot)test/integration/extent.c \

View File

@ -426,13 +426,14 @@ for (i = 0; i < nbins; i++) {
<function>mallctl*()</function> functions internally, so inconsistent <function>mallctl*()</function> functions internally, so inconsistent
statistics can be reported if multiple threads use these functions statistics can be reported if multiple threads use these functions
simultaneously. If <option>--enable-stats</option> is specified during simultaneously. If <option>--enable-stats</option> is specified during
configuration, <quote>m</quote> and <quote>a</quote> can be specified to configuration, <quote>m</quote>, <quote>d</quote>, and <quote>a</quote>
omit merged arena and per arena statistics, respectively; <quote>b</quote> can be specified to omit merged arena, destroyed merged arena, and per
and <quote>l</quote> can be specified to omit per size class statistics arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
for bins and large objects, respectively. Unrecognized characters are be specified to omit per size class statistics for bins and large objects,
silently ignored. Note that thread caching may prevent some statistics respectively. Unrecognized characters are silently ignored. Note that
from being completely up to date, since extra locking would be required to thread caching may prevent some statistics from being completely up to
merge counters that track thread cache operations.</para> date, since extra locking would be required to merge counters that track
thread cache operations.</para>
<para>The <function>malloc_usable_size()</function> function <para>The <function>malloc_usable_size()</function> function
returns the usable size of the allocation pointed to by returns the usable size of the allocation pointed to by
@ -687,18 +688,21 @@ for (i = 0; i < nbins; i++) {
<refsect1 id="mallctl_namespace"> <refsect1 id="mallctl_namespace">
<title>MALLCTL NAMESPACE</title> <title>MALLCTL NAMESPACE</title>
<para>The following names are defined in the namespace accessible via the <para>The following names are defined in the namespace accessible via the
<function>mallctl*()</function> functions. Value types are <function>mallctl*()</function> functions. Value types are specified in
specified in parentheses, their readable/writable statuses are encoded as parentheses, their readable/writable statuses are encoded as
<literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
<literal>--</literal>, and required build configuration flags follow, if <literal>--</literal>, and required build configuration flags follow, if
any. A name element encoded as <literal>&lt;i&gt;</literal> or any. A name element encoded as <literal>&lt;i&gt;</literal> or
<literal>&lt;j&gt;</literal> indicates an integer component, where the <literal>&lt;j&gt;</literal> indicates an integer component, where the
integer varies from 0 to some upper value that must be determined via integer varies from 0 to some upper value that must be determined via
introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl> introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>
and <mallctl>arena.&lt;i&gt;.{purge,decay,dss}</mallctl>, and <mallctl>arena.&lt;i&gt;.{initialized,purge,decay,dss}</mallctl>,
<literal>&lt;i&gt;</literal> equal to <literal>&lt;i&gt;</literal> equal to
<constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas <constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas
or access the summation of statistics from all arenas. This constant can be or access the summation of statistics from all arenas; similarly
<literal>&lt;i&gt;</literal> equal to
<constant>MALLCTL_ARENAS_DESTROYED</constant> can be used to access the
summation of statistics from all destroyed arenas. These constants can be
utilized either via <function>mallctlnametomib()</function> followed by utilized either via <function>mallctlnametomib()</function> followed by
<function>mallctlbymib()</function>, or via code such as the following: <function>mallctlbymib()</function>, or via code such as the following:
<programlisting language="C"><![CDATA[ <programlisting language="C"><![CDATA[
@ -707,9 +711,9 @@ for (i = 0; i < nbins; i++) {
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay", mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
NULL, NULL, NULL, 0);]]></programlisting> NULL, NULL, NULL, 0);]]></programlisting>
Take special note of the Take special note of the <link
<link linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which controls
controls refreshing of cached dynamic statistics.</para> refreshing of cached dynamic statistics.</para>
<variablelist> <variablelist>
<varlistentry id="version"> <varlistentry id="version">
@ -1478,6 +1482,25 @@ malloc_conf = "xmalloc:true";]]></programlisting>
beforehand.</para></listitem> beforehand.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.destroy">
<term>
<mallctl>arena.&lt;i&gt;.destroy</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Destroy the arena. Discard all of the arena's extant
allocations using the same mechanism as for <link
linkend="arena.i.reset"><mallctl>arena.&lt;i&gt;.reset</mallctl></link>
(with all the same constraints and side effects), merge the arena stats
into those accessible at arena index
<constant>MALLCTL_ARENAS_DESTROYED</constant>, and then completely
discard all metadata associated with the arena. Future calls to <link
linkend="arenas.create"><mallctl>arenas.create</mallctl></link> may
recycle the arena index. Destruction will fail if any threads are
currently associated with the arena as a result of calls to <link
linkend="thread.arena"><mallctl>thread.arena</mallctl></link>.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.dss"> <varlistentry id="arena.i.dss">
<term> <term>
<mallctl>arena.&lt;i&gt;.dss</mallctl> <mallctl>arena.&lt;i&gt;.dss</mallctl>

View File

@ -290,6 +290,7 @@ bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena); void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,

View File

@ -32,7 +32,10 @@ struct ctl_indexed_node_s {
}; };
struct ctl_arena_stats_s { struct ctl_arena_stats_s {
unsigned arena_ind;
bool initialized; bool initialized;
ql_elm(ctl_arena_stats_t) destroyed_link;
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
ssize_t decay_time; ssize_t decay_time;
@ -62,7 +65,14 @@ struct ctl_stats_s {
size_t mapped; size_t mapped;
size_t retained; size_t retained;
unsigned narenas; unsigned narenas;
ctl_arena_stats_t *arenas[1 << MALLOCX_ARENA_BITS]; ql_head(ctl_arena_stats_t) destroyed;
/*
* Element 0 contains merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 contains merged stats for destroyed
* arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the remaining
* MALLOCX_ARENA_MAX+1 elements correspond to arenas.
*/
ctl_arena_stats_t *arenas[MALLOCX_ARENA_MAX + 3];
}; };
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */

View File

@ -125,6 +125,8 @@ extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena, void extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent); extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent); extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,

View File

@ -215,6 +215,7 @@ typedef unsigned szind_t;
#define MALLOCX_TCACHE_SHIFT 8 #define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \ #define MALLOCX_ARENA_MASK \
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_MAX ((1 << MALLOCX_ARENA_BITS) - 2) #define MALLOCX_ARENA_MAX ((1 << MALLOCX_ARENA_BITS) - 2)
#define MALLOCX_TCACHE_MASK \ #define MALLOCX_TCACHE_MASK \
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
@ -470,6 +471,7 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size); void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size); void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr); void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void); unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);

View File

@ -21,6 +21,7 @@ arena_decay_time_default_get
arena_decay_time_default_set arena_decay_time_default_set
arena_decay_time_get arena_decay_time_get
arena_decay_time_set arena_decay_time_set
arena_destroy
arena_dss_prec_get arena_dss_prec_get
arena_dss_prec_set arena_dss_prec_set
arena_extent_alloc_large arena_extent_alloc_large
@ -67,6 +68,7 @@ arena_ralloc_no_move
arena_reset arena_reset
arena_salloc arena_salloc
arena_sdalloc arena_sdalloc
arena_set
arena_slab_regind arena_slab_regind
arena_stats_merge arena_stats_merge
arena_tcache_fill_small arena_tcache_fill_small
@ -164,6 +166,7 @@ extent_dalloc_cache
extent_dalloc_gap extent_dalloc_gap
extent_dalloc_mmap extent_dalloc_mmap
extent_dalloc_wrapper extent_dalloc_wrapper
extent_dalloc_wrapper_try
extent_decommit_wrapper extent_decommit_wrapper
extent_dss_boot extent_dss_boot
extent_dss_mergeable extent_dss_mergeable

View File

@ -44,6 +44,11 @@
* 0); * 0);
*/ */
#define MALLCTL_ARENAS_ALL 4096 #define MALLCTL_ARENAS_ALL 4096
/*
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
* destroyed arenas.
*/
#define MALLCTL_ARENAS_DESTROYED 4097
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw() # define JEMALLOC_CXX_THROW throw()

View File

@ -903,6 +903,72 @@ arena_reset(tsd_t *tsd, arena_t *arena)
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
} }
static void
arena_destroy_retained(tsdn_t *tsdn, arena_t *arena)
{
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
size_t i;
/*
* Iterate over the retained extents and blindly attempt to deallocate
* them. This gives the extent allocator underlying the extent hooks an
* opportunity to unmap all retained memory without having to keep its
* own metadata structures, but if deallocation fails, that is the
* application's decision/problem. In practice, retained extents are
* leaked here if !config_munmap unless the application provided custom
* extent hooks, so best practice to either enable munmap (and avoid dss
* for arenas to be destroyed), or provide custom extent hooks that
* either unmap retained extents or track them for later use.
*/
for (i = 0; i < sizeof(arena->extents_retained)/sizeof(extent_heap_t);
i++) {
extent_heap_t *extents = &arena->extents_retained[i];
extent_t *extent;
while ((extent = extent_heap_remove_first(extents)) != NULL) {
extent_dalloc_wrapper_try(tsdn, arena, &extent_hooks,
extent);
}
}
}
void
arena_destroy(tsd_t *tsd, arena_t *arena)
{
assert(base_ind_get(arena->base) >= narenas_auto);
assert(arena_nthreads_get(arena, false) == 0);
assert(arena_nthreads_get(arena, true) == 0);
/*
* No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain.
*/
assert(arena->ndirty == 0);
/* Attempt to deallocate retained memory. */
arena_destroy_retained(tsd_tsdn(tsd), arena);
/*
* Remove the arena pointer from the arenas array. We rely on the fact
* that there is no way for the application to get a dirty read from the
* arenas array unless there is an inherent race in the application
* involving access of an arena being concurrently destroyed. The
* application must synchronize knowledge of the arena's validity, so as
* long as we use an atomic write to update the arenas array, the
* application will get a clean read any time after it synchronizes
* knowledge that the arena is no longer valid.
*/
arena_set(base_ind_get(arena->base), NULL);
/*
* Destroy the base allocator, which manages all metadata ever mapped by
* this arena.
*/
base_delete(arena->base);
}
static extent_t * static extent_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info) extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info)

277
src/ctl.c
View File

@ -48,18 +48,6 @@ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i); const size_t *mib, size_t miblen, size_t i);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
static bool ctl_grow(tsdn_t *tsdn, extent_hooks_t *extent_hooks);
static void ctl_refresh(tsdn_t *tsdn);
static bool ctl_init(tsdn_t *tsdn);
static int ctl_lookup(tsdn_t *tsdn, const char *name,
ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
CTL_PROTO(version) CTL_PROTO(version)
CTL_PROTO(epoch) CTL_PROTO(epoch)
CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_enabled)
@ -113,6 +101,7 @@ CTL_PROTO(arena_i_initialized)
CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_decay_time) CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_extent_hooks) CTL_PROTO(arena_i_extent_hooks)
@ -274,6 +263,7 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)}, {NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)}, {NAME("decay"), CTL(arena_i_decay)},
{NAME("reset"), CTL(arena_i_reset)}, {NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)}, {NAME("dss"), CTL(arena_i_dss)},
{NAME("decay_time"), CTL(arena_i_decay_time)}, {NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)} {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}
@ -452,6 +442,9 @@ stats_arenas_i2a_impl(size_t i, bool compat, bool validate)
case MALLCTL_ARENAS_ALL: case MALLCTL_ARENAS_ALL:
a = 0; a = 0;
break; break;
case MALLCTL_ARENAS_DESTROYED:
a = 1;
break;
default: default:
if (compat && i == ctl_stats->narenas) { if (compat && i == ctl_stats->narenas) {
/* /*
@ -471,7 +464,7 @@ stats_arenas_i2a_impl(size_t i, bool compat, bool validate)
*/ */
assert(i < ctl_stats->narenas || (!validate && i == assert(i < ctl_stats->narenas || (!validate && i ==
ctl_stats->narenas)); ctl_stats->narenas));
a = (unsigned)i + 1; a = (unsigned)i + 2;
} }
break; break;
} }
@ -479,6 +472,13 @@ stats_arenas_i2a_impl(size_t i, bool compat, bool validate)
return (a); return (a);
} }
static unsigned
stats_arenas_i2a(size_t i)
{
return (stats_arenas_i2a_impl(i, true, false));
}
static ctl_arena_stats_t * static ctl_arena_stats_t *
stats_arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) stats_arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init)
{ {
@ -492,10 +492,13 @@ stats_arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init)
sizeof(ctl_arena_stats_t), QUANTUM); sizeof(ctl_arena_stats_t), QUANTUM);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
ret->arena_ind = (unsigned)i;
ctl_stats->arenas[stats_arenas_i2a_impl(i, compat, false)] = ctl_stats->arenas[stats_arenas_i2a_impl(i, compat, false)] =
ret; ret;
} }
assert(ret == NULL || stats_arenas_i2a(ret->arena_ind) ==
stats_arenas_i2a(i));
return (ret); return (ret);
} }
@ -553,92 +556,130 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
} }
static void static void
ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) ctl_arena_stats_sdmerge(ctl_arena_stats_t *sdstats, ctl_arena_stats_t *astats,
bool destroyed)
{ {
unsigned i; unsigned i;
sstats->nthreads += astats->nthreads; if (!destroyed) {
sstats->pactive += astats->pactive; sdstats->nthreads += astats->nthreads;
sstats->pdirty += astats->pdirty; sdstats->pactive += astats->pactive;
sdstats->pdirty += astats->pdirty;
} else {
assert(astats->nthreads == 0);
assert(astats->pactive == 0);
assert(astats->pdirty == 0);
}
if (config_stats) { if (config_stats) {
sstats->astats.mapped += astats->astats.mapped; if (!destroyed) {
sstats->astats.retained += astats->astats.retained; sdstats->astats.mapped += astats->astats.mapped;
sstats->astats.npurge += astats->astats.npurge; sdstats->astats.retained += astats->astats.retained;
sstats->astats.nmadvise += astats->astats.nmadvise; }
sstats->astats.purged += astats->astats.purged; sdstats->astats.npurge += astats->astats.npurge;
sdstats->astats.nmadvise += astats->astats.nmadvise;
sdstats->astats.purged += astats->astats.purged;
sstats->astats.base += astats->astats.base; if (!destroyed) {
sstats->astats.internal += astats->astats.internal; sdstats->astats.base += astats->astats.base;
sstats->astats.resident += astats->astats.resident; sdstats->astats.internal += astats->astats.internal;
sdstats->astats.resident += astats->astats.resident;
} else
assert(astats->astats.internal == 0);
sstats->allocated_small += astats->allocated_small; if (!destroyed)
sstats->nmalloc_small += astats->nmalloc_small; sdstats->allocated_small += astats->allocated_small;
sstats->ndalloc_small += astats->ndalloc_small; else
sstats->nrequests_small += astats->nrequests_small; assert(astats->allocated_small == 0);
sdstats->nmalloc_small += astats->nmalloc_small;
sdstats->ndalloc_small += astats->ndalloc_small;
sdstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large += if (!destroyed) {
astats->astats.allocated_large; sdstats->astats.allocated_large +=
sstats->astats.nmalloc_large += astats->astats.nmalloc_large; astats->astats.allocated_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large; } else
sstats->astats.nrequests_large += assert(astats->astats.allocated_large == 0);
sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sdstats->astats.nrequests_large +=
astats->astats.nrequests_large; astats->astats.nrequests_large;
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += sdstats->bstats[i].nrequests +=
astats->bstats[i].nrequests; astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs; if (!destroyed) {
sdstats->bstats[i].curregs +=
astats->bstats[i].curregs;
} else
assert(astats->bstats[i].curregs == 0);
if (config_tcache) { if (config_tcache) {
sstats->bstats[i].nfills += sdstats->bstats[i].nfills +=
astats->bstats[i].nfills; astats->bstats[i].nfills;
sstats->bstats[i].nflushes += sdstats->bstats[i].nflushes +=
astats->bstats[i].nflushes; astats->bstats[i].nflushes;
} }
sstats->bstats[i].nslabs += astats->bstats[i].nslabs; sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
sstats->bstats[i].reslabs += astats->bstats[i].reslabs; sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
sstats->bstats[i].curslabs += if (!destroyed) {
astats->bstats[i].curslabs; sdstats->bstats[i].curslabs +=
astats->bstats[i].curslabs;
} else
assert(astats->bstats[i].curslabs == 0);
} }
for (i = 0; i < NSIZES - NBINS; i++) { for (i = 0; i < NSIZES - NBINS; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; sdstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; sdstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += sdstats->lstats[i].nrequests +=
astats->lstats[i].nrequests; astats->lstats[i].nrequests;
sstats->lstats[i].curlextents += if (!destroyed) {
astats->lstats[i].curlextents; sdstats->lstats[i].curlextents +=
astats->lstats[i].curlextents;
} else
assert(astats->lstats[i].curlextents == 0);
} }
} }
} }
static void static void
ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_stats_t *sdstats,
unsigned i, bool destroyed)
{ {
ctl_arena_stats_t *astats = stats_arenas_i(i); ctl_arena_stats_t *astats = stats_arenas_i(i);
ctl_arena_stats_t *sstats = stats_arenas_i(MALLCTL_ARENAS_ALL);
ctl_arena_clear(astats); ctl_arena_clear(astats);
ctl_arena_stats_amerge(tsdn, astats, arena); ctl_arena_stats_amerge(tsdn, astats, arena);
/* Merge into sum stats as well. */ /* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats); ctl_arena_stats_sdmerge(sdstats, astats, destroyed);
} }
static bool static unsigned
ctl_grow(tsdn_t *tsdn, extent_hooks_t *extent_hooks) ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks)
{ {
unsigned arena_ind;
ctl_arena_stats_t *astats;
if ((astats = ql_last(&ctl_stats->destroyed, destroyed_link)) != NULL) {
ql_remove(&ctl_stats->destroyed, astats, destroyed_link);
arena_ind = astats->arena_ind;
} else
arena_ind = ctl_stats->narenas;
/* Trigger stats allocation. */ /* Trigger stats allocation. */
if (stats_arenas_i_impl(tsdn, ctl_stats->narenas, false, true) == NULL) if (stats_arenas_i_impl(tsdn, arena_ind, false, true) == NULL)
return (true); return (UINT_MAX);
/* Initialize new arena. */ /* Initialize new arena. */
if (arena_init(tsdn, ctl_stats->narenas, extent_hooks) == NULL) if (arena_init(tsdn, arena_ind, extent_hooks) == NULL)
return (true); return (UINT_MAX);
ctl_stats->narenas++;
return (false); if (arena_ind == ctl_stats->narenas)
ctl_stats->narenas++;
return (arena_ind);
} }
static void static void
@ -663,7 +704,7 @@ ctl_refresh(tsdn_t *tsdn)
astats->initialized = initialized; astats->initialized = initialized;
if (initialized) if (initialized)
ctl_arena_refresh(tsdn, tarenas[i], i); ctl_arena_refresh(tsdn, tarenas[i], sstats, i, false);
} }
if (config_stats) { if (config_stats) {
@ -687,7 +728,7 @@ ctl_init(tsdn_t *tsdn)
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) { if (!ctl_initialized) {
ctl_arena_stats_t *sstats; ctl_arena_stats_t *sstats, *dstats;
unsigned i; unsigned i;
/* /*
@ -715,6 +756,19 @@ ctl_init(tsdn_t *tsdn)
} }
sstats->initialized = true; sstats->initialized = true;
if ((dstats = stats_arenas_i_impl(tsdn,
MALLCTL_ARENAS_DESTROYED, false, true)) == NULL) {
ret = true;
goto label_return;
}
ctl_arena_clear(dstats);
/*
* Don't toggle stats for MALLCTL_ARENAS_DESTROYED to
* initialized until an arena is actually destroyed, so that
* arena.<i>.initialized can be used to query whether the stats
* are relevant.
*/
ctl_stats->narenas = narenas_total_get(); ctl_stats->narenas = narenas_total_get();
for (i = 0; i < ctl_stats->narenas; i++) { for (i = 0; i < ctl_stats->narenas; i++) {
if (stats_arenas_i_impl(tsdn, i, false, true) == NULL) { if (stats_arenas_i_impl(tsdn, i, false, true) == NULL) {
@ -723,7 +777,7 @@ ctl_init(tsdn_t *tsdn)
} }
} }
ctl_stats->epoch = 0; ql_new(&ctl_stats->destroyed);
ctl_refresh(tsdn); ctl_refresh(tsdn);
ctl_initialized = true; ctl_initialized = true;
} }
@ -1562,6 +1616,33 @@ label_return:
return (ret); return (ret);
} }
static int
arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
arena_t **arena)
{
int ret;
READONLY();
WRITEONLY();
MIB_UNSIGNED(*arena_ind, 1);
if (*arena_ind < narenas_auto) {
ret = EFAULT;
goto label_return;
}
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
if (*arena == NULL) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
return (ret);
}
static int static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
@ -1570,26 +1651,51 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
unsigned arena_ind; unsigned arena_ind;
arena_t *arena; arena_t *arena;
READONLY(); ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
WRITEONLY(); newp, newlen, &arena_ind, &arena);
MIB_UNSIGNED(arena_ind, 1); if (ret != 0)
return (ret);
if (config_debug) { arena_reset(tsd, arena);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
assert(arena_ind < ctl_stats->narenas);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
}
assert(arena_ind >= opt_narenas);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false); return (ret);
if (arena == NULL) { }
static int
arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind;
arena_t *arena;
ctl_arena_stats_t *dstats, *astats;
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0)
goto label_return;
if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
true) != 0) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
/* Merge stats after resetting and purging arena. */
arena_reset(tsd, arena); arena_reset(tsd, arena);
arena_purge(tsd_tsdn(tsd), arena, true);
dstats = stats_arenas_i(MALLCTL_ARENAS_DESTROYED);
dstats->initialized = true;
ctl_arena_refresh(tsd_tsdn(tsd), arena, dstats, arena_ind, true);
/* Destroy arena. */
arena_destroy(tsd, arena);
astats = stats_arenas_i(arena_ind);
astats->initialized = false;
/* Record arena index for later recycling via arenas.create. */
ql_elm_new(astats, destroyed_link);
ql_tail_insert(&ctl_stats->destroyed, astats, destroyed_link);
ret = 0; assert(ret == 0);
label_return: label_return:
return (ret); return (ret);
} }
@ -1733,9 +1839,16 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
const ctl_named_node_t *ret; const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats->narenas && i != MALLCTL_ARENAS_ALL) { switch (i) {
ret = NULL; case MALLCTL_ARENAS_ALL:
goto label_return; case MALLCTL_ARENAS_DESTROYED:
break;
default:
if (i > ctl_stats->narenas) {
ret = NULL;
goto label_return;
}
break;
} }
ret = super_arena_i_node; ret = super_arena_i_node;
@ -1828,18 +1941,18 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
{ {
int ret; int ret;
extent_hooks_t *extent_hooks; extent_hooks_t *extent_hooks;
unsigned narenas; unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
extent_hooks = (extent_hooks_t *)&extent_hooks_default; extent_hooks = (extent_hooks_t *)&extent_hooks_default;
WRITE(extent_hooks, extent_hooks_t *); WRITE(extent_hooks, extent_hooks_t *);
if (ctl_grow(tsd_tsdn(tsd), extent_hooks)) { if ((arena_ind = ctl_arena_init(tsd_tsdn(tsd), extent_hooks)) ==
UINT_MAX) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
narenas = ctl_stats->narenas - 1; READ(arena_ind, unsigned);
READ(narenas, unsigned);
ret = 0; ret = 0;
label_return: label_return:

View File

@ -1039,11 +1039,11 @@ extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
return (extent_dalloc_default_impl(addr, size)); return (extent_dalloc_default_impl(addr, size));
} }
void bool
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) extent_hooks_t **r_extent_hooks, extent_t *extent)
{ {
bool err, zeroed; bool err;
assert(extent_base_get(extent) != NULL); assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0); assert(extent_size_get(extent) != 0);
@ -1067,10 +1067,21 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_committed_get(extent), arena_ind_get(arena))); extent_committed_get(extent), arena_ind_get(arena)));
} }
if (!err) { if (!err)
extent_dalloc(tsdn, arena, extent); extent_dalloc(tsdn, arena, extent);
return (err);
}
void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent)
{
bool zeroed;
if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent))
return; return;
}
extent_reregister(tsdn, extent); extent_reregister(tsdn, extent);
/* Try to decommit; purge if that fails. */ /* Try to decommit; purge if that fails. */
if (!extent_committed_get(extent)) if (!extent_committed_get(extent))

View File

@ -375,7 +375,7 @@ bootstrap_free(void *ptr)
a0idalloc(iealloc(NULL, ptr), ptr, false); a0idalloc(iealloc(NULL, ptr), ptr, false);
} }
static void void
arena_set(unsigned ind, arena_t *arena) arena_set(unsigned ind, arena_t *arena)
{ {

View File

@ -772,7 +772,8 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void static void
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool unmerged, bool bins, bool large) bool json, bool merged, bool destroyed, bool unmerged, bool bins,
bool large)
{ {
size_t allocated, active, metadata, resident, mapped, retained; size_t allocated, active, metadata, resident, mapped, retained;
@ -808,7 +809,7 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
allocated, active, metadata, resident, mapped, retained); allocated, active, metadata, resident, mapped, retained);
} }
if (merged || unmerged) { if (merged || destroyed || unmerged) {
unsigned narenas; unsigned narenas;
if (json) { if (json) {
@ -822,6 +823,7 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
size_t miblen = sizeof(mib) / sizeof(size_t); size_t miblen = sizeof(mib) / sizeof(size_t);
size_t sz; size_t sz;
VARIABLE_ARRAY(bool, initialized, narenas); VARIABLE_ARRAY(bool, initialized, narenas);
bool destroyed_initialized;
unsigned i, j, ninitialized; unsigned i, j, ninitialized;
xmallctlnametomib("arena.0.initialized", mib, &miblen); xmallctlnametomib("arena.0.initialized", mib, &miblen);
@ -833,6 +835,10 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
if (initialized[i]) if (initialized[i])
ninitialized++; ninitialized++;
} }
mib[1] = MALLCTL_ARENAS_DESTROYED;
sz = sizeof(bool);
xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
NULL, 0);
/* Merged stats. */ /* Merged stats. */
if (merged && (ninitialized > 1 || !unmerged)) { if (merged && (ninitialized > 1 || !unmerged)) {
@ -853,6 +859,25 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
} }
} }
/* Destroyed stats. */
if (destroyed_initialized && destroyed) {
/* Print destroyed arena stats. */
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"destroyed\": {\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"\nDestroyed arenas stats:\n");
}
stats_arena_print(write_cb, cbopaque, json,
MALLCTL_ARENAS_DESTROYED, bins, large);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t}%s\n", (ninitialized > 1) ?
"," : "");
}
}
/* Unmerged stats. */ /* Unmerged stats. */
for (i = j = 0; i < narenas; i++) { for (i = j = 0; i < narenas; i++) {
if (initialized[i]) { if (initialized[i]) {
@ -895,6 +920,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json = false; bool json = false;
bool general = true; bool general = true;
bool merged = config_stats; bool merged = config_stats;
bool destroyed = config_stats;
bool unmerged = config_stats; bool unmerged = config_stats;
bool bins = true; bool bins = true;
bool large = true; bool large = true;
@ -935,6 +961,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
case 'm': case 'm':
merged = false; merged = false;
break; break;
case 'd':
destroyed = false;
break;
case 'a': case 'a':
unmerged = false; unmerged = false;
break; break;
@ -963,8 +992,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
stats_general_print(write_cb, cbopaque, json, more); stats_general_print(write_cb, cbopaque, json, more);
} }
if (config_stats) { if (config_stats) {
stats_print_helper(write_cb, cbopaque, json, merged, unmerged, stats_print_helper(write_cb, cbopaque, json, merged, destroyed,
bins, large); unmerged, bins, large);
} }
if (json) { if (json) {

View File

@ -1,9 +1,9 @@
#ifndef ARENA_RESET_PROF_C_
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,lg_prof_sample:0";
#endif #endif
#include "test/extent_hooks.h"
static unsigned static unsigned
get_nsizes_impl(const char *cmd) get_nsizes_impl(const char *cmd)
{ {
@ -79,57 +79,64 @@ vsalloc(tsdn_t *tsdn, const void *ptr)
return (isalloc(tsdn, extent, ptr)); return (isalloc(tsdn, extent, ptr));
} }
TEST_BEGIN(test_arena_reset) static unsigned
do_arena_create(extent_hooks_t *h)
{
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
return (arena_ind);
}
static void
do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs)
{ {
#define NLARGE 32 #define NLARGE 32
unsigned arena_ind, nsmall, nlarge, nptrs, i; unsigned nsmall, nlarge, i;
size_t sz, miblen; size_t sz;
void **ptrs;
int flags; int flags;
size_t mib[3];
tsdn_t *tsdn; tsdn_t *tsdn;
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
nsmall = get_nsmall(); nsmall = get_nsmall();
nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
nptrs = nsmall + nlarge; *nptrs = nsmall + nlarge;
ptrs = (void **)malloc(nptrs * sizeof(void *)); *ptrs = (void **)malloc(*nptrs * sizeof(void *));
assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); assert_ptr_not_null(*ptrs, "Unexpected malloc() failure");
/* Allocate objects with a wide range of sizes. */ /* Allocate objects with a wide range of sizes. */
for (i = 0; i < nsmall; i++) { for (i = 0; i < nsmall; i++) {
sz = get_small_size(i); sz = get_small_size(i);
ptrs[i] = mallocx(sz, flags); (*ptrs)[i] = mallocx(sz, flags);
assert_ptr_not_null(ptrs[i], assert_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags); "Unexpected mallocx(%zu, %#x) failure", sz, flags);
} }
for (i = 0; i < nlarge; i++) { for (i = 0; i < nlarge; i++) {
sz = get_large_size(i); sz = get_large_size(i);
ptrs[nsmall + i] = mallocx(sz, flags); (*ptrs)[nsmall + i] = mallocx(sz, flags);
assert_ptr_not_null(ptrs[i], assert_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags); "Unexpected mallocx(%zu, %#x) failure", sz, flags);
} }
tsdn = tsdn_fetch(); tsdn = tsdn_fetch();
/* Verify allocations. */ /* Verify allocations. */
for (i = 0; i < nptrs; i++) { for (i = 0; i < *nptrs; i++) {
assert_zu_gt(ivsalloc(tsdn, ptrs[i]), 0, assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
"Allocation should have queryable size"); "Allocation should have queryable size");
} }
}
/* Reset. */ static void
miblen = sizeof(mib)/sizeof(size_t); do_arena_reset_post(void **ptrs, unsigned nptrs)
assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, {
"Unexpected mallctlnametomib() failure"); tsdn_t *tsdn;
mib[1] = (size_t)arena_ind; unsigned i;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure"); tsdn = tsdn_fetch();
/* Verify allocations no longer exist. */ /* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) { for (i = 0; i < nptrs; i++) {
@ -139,6 +146,193 @@ TEST_BEGIN(test_arena_reset)
free(ptrs); free(ptrs);
} }
static void
do_arena_reset_destroy(const char *name, unsigned arena_ind)
{
size_t mib[3];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib(name, mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
static void
do_arena_reset(unsigned arena_ind)
{
do_arena_reset_destroy("arena.0.reset", arena_ind);
}
static void
do_arena_destroy(unsigned arena_ind)
{
do_arena_reset_destroy("arena.0.destroy", arena_ind);
}
TEST_BEGIN(test_arena_reset)
{
unsigned arena_ind;
void **ptrs;
unsigned nptrs;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
do_arena_reset(arena_ind);
do_arena_reset_post(ptrs, nptrs);
}
TEST_END
static bool
arena_i_initialized(unsigned arena_ind, bool refresh)
{
bool initialized;
size_t mib[3];
size_t miblen, sz;
if (refresh) {
uint64_t epoch = 1;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure");
}
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
sz = sizeof(initialized);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
0), 0, "Unexpected mallctlbymib() failure");
return (initialized);
}
TEST_BEGIN(test_arena_destroy_initial)
{
assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should not be initialized");
}
TEST_END
TEST_BEGIN(test_arena_destroy_hooks_default)
{
unsigned arena_ind, arena_ind_another, arena_ind_prev;
void **ptrs;
unsigned nptrs;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
assert_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
/*
* Create another arena before destroying one, to better verify arena
* index reuse.
*/
arena_ind_another = do_arena_create(NULL);
do_arena_destroy(arena_ind);
assert_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs);
arena_ind_prev = arena_ind;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
assert_u_eq(arena_ind, arena_ind_prev,
"Arena index should have been recycled");
do_arena_destroy(arena_ind);
do_arena_reset_post(ptrs, nptrs);
do_arena_destroy(arena_ind_another);
}
TEST_END
/*
* Actually unmap extents, regardless of config_munmap, so that attempts to
* access a destroyed arena's memory will segfault.
*/
static bool
extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind)
{
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
assert_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc)
return (true);
pages_unmap(addr, size);
did_dalloc = true;
return (false);
}
static extent_hooks_t hooks_orig;
static extent_hooks_t hooks_unmap = {
extent_alloc_hook,
extent_dalloc_unmap, /* dalloc */
extent_commit_hook,
extent_decommit_hook,
extent_purge_lazy_hook,
extent_purge_forced_hook,
extent_split_hook,
extent_merge_hook
};
TEST_BEGIN(test_arena_destroy_hooks_unmap)
{
unsigned arena_ind;
void **ptrs;
unsigned nptrs;
extent_hooks_prep();
try_decommit = false;
memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
did_alloc = false;
arena_ind = do_arena_create(&hooks);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
assert_true(did_alloc, "Expected alloc");
assert_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
did_dalloc = false;
do_arena_destroy(arena_ind);
assert_true(did_dalloc, "Expected dalloc");
assert_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs);
memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
}
TEST_END TEST_END
int int
@ -146,5 +340,8 @@ main(void)
{ {
return (test( return (test(
test_arena_reset)); test_arena_reset,
test_arena_destroy_initial,
test_arena_destroy_hooks_default,
test_arena_destroy_hooks_unmap));
} }

View File

@ -0,0 +1,5 @@
#include "test/jemalloc_test.h"
#define ARENA_RESET_PROF_C_
const char *malloc_conf = "prof:true,lg_prof_sample:0";
#include "arena_reset.c"

View File

@ -381,6 +381,15 @@ TEST_BEGIN(test_arena_i_initialized)
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
assert_true(initialized, assert_true(initialized,
"Merged arena statistics should always be initialized"); "Merged arena statistics should always be initialized");
/* Equivalent to the above but using mallctl() directly. */
sz = sizeof(initialized);
assert_d_eq(mallctl(
"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
(void *)&initialized, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_true(initialized,
"Merged arena statistics should always be initialized");
} }
TEST_END TEST_END