Implement two-phase decay-based purging.

Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether.  Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.

The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
  stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}

This resolves #521.
This commit is contained in:
Jason Evans 2017-03-08 22:42:57 -08:00
parent 38a5bfc816
commit 64e458f5cd
23 changed files with 1078 additions and 490 deletions

View File

@ -442,8 +442,8 @@ ifeq ($(enable_prof), 1)
$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
endif
check_integration_decay: tests_integration check_integration_dir
$(MALLOC_CONF)="decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
$(MALLOC_CONF)="decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
$(MALLOC_CONF)="dirty_decay_time:-1,muzzy_decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
$(MALLOC_CONF)="dirty_decay_time:0,muzzy_decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
check_integration: tests_integration check_integration_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
stress: tests_stress stress_dir

View File

@ -944,24 +944,54 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
<quote>percpu</quote>. </para></listitem>
</varlistentry>
<varlistentry id="opt.decay_time">
<varlistentry id="opt.dirty_decay_time">
<term>
<mallctl>opt.decay_time</mallctl>
<mallctl>opt.dirty_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in seconds from the creation of a set
of unused dirty pages until an equivalent set of unused dirty pages is
purged and/or reused. The pages are incrementally purged according to a
sigmoidal decay curve that starts and ends with zero purge rate. A
decay time of 0 causes all unused dirty pages to be purged immediately
upon creation. A decay time of -1 disables purging. The default decay
time is 10 seconds. See <link
linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
purged (i.e. converted to muzzy via e.g.
<function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>
if supported by the operating system, or converted to clean otherwise)
and/or reused. Dirty pages are defined as previously having been
potentially written to by the application, and therefore consuming
physical memory, yet having no current use. The pages are incrementally
purged according to a sigmoidal decay curve that starts and ends with
zero purge rate. A decay time of 0 causes all unused dirty pages to be
purged immediately upon creation. A decay time of -1 disables purging.
The default decay time is 10 seconds. See <link
linkend="arenas.dirty_decay_time"><mallctl>arenas.dirty_decay_time</mallctl></link>
and <link
linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
for related dynamic control options.
</para></listitem>
linkend="arena.i.muzzy_decay_time"><mallctl>arena.&lt;i&gt;.muzzy_decay_time</mallctl></link>
for related dynamic control options. See <link
linkend="opt.muzzy_decay_time"><mallctl>opt.muzzy_decay_time</mallctl></link>
for a description of muzzy pages.</para></listitem>
</varlistentry>
<varlistentry id="opt.muzzy_decay_time">
<term>
<mallctl>opt.muzzy_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in seconds from the creation of a set
of unused muzzy pages until an equivalent set of unused muzzy pages is
purged (i.e. converted to clean) and/or reused. Muzzy pages are defined
as previously having been unused dirty pages that were subsequently
purged in a manner that left them subject to the reclamation whims of
the operating system (e.g.
<function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>),
and therefore in an indeterminate state. The pages are incrementally
purged according to a sigmoidal decay curve that starts and ends with
zero purge rate. A decay time of 0 causes all unused muzzy pages to be
purged immediately upon creation. A decay time of -1 disables purging.
The default decay time is 10 seconds. See <link
linkend="arenas.muzzy_decay_time"><mallctl>arenas.muzzy_decay_time</mallctl></link>
and <link
linkend="arena.i.muzzy_decay_time"><mallctl>arena.&lt;i&gt;.muzzy_decay_time</mallctl></link>
for related dynamic control options.</para></listitem>
</varlistentry>
<varlistentry id="opt.stats_print">
@ -1460,6 +1490,22 @@ malloc_conf = "xmalloc:true";]]></programlisting>
initialized (always true).</para></listitem>
</varlistentry>
<varlistentry id="arena.i.decay">
<term>
<mallctl>arena.&lt;i&gt;.decay</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Trigger decay-based purging of unused dirty/muzzy pages
for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<constant>MALLCTL_ARENAS_ALL</constant>. The proportion of unused
dirty/muzzy pages to be purged depends on the current time; see <link
linkend="opt.dirty_decay_time"><mallctl>opt.dirty_decay_time</mallctl></link>
and <link
linkend="opt.muzzy_decay_time"><mallctl>opt.muzy_decay_time</mallctl></link>
for details.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.purge">
<term>
<mallctl>arena.&lt;i&gt;.purge</mallctl>
@ -1471,20 +1517,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
<varlistentry id="arena.i.decay">
<term>
<mallctl>arena.&lt;i&gt;.decay</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Trigger decay-based purging of unused dirty pages for
arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<constant>MALLCTL_ARENAS_ALL</constant>. The proportion of unused dirty
pages to be purged depends on the current time; see <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
details.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.reset">
<term>
<mallctl>arena.&lt;i&gt;.reset</mallctl>
@ -1532,9 +1564,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
settings.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.decay_time">
<varlistentry id="arena.i.dirty_decay_time">
<term>
<mallctl>arena.&lt;i&gt;.decay_time</mallctl>
<mallctl>arena.&lt;i&gt;.dirty_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>rw</literal>
</term>
@ -1544,8 +1576,24 @@ malloc_conf = "xmalloc:true";]]></programlisting>
set, all currently unused dirty pages are considered to have fully
decayed, which causes immediate purging of all unused dirty pages unless
the decay time is set to -1 (i.e. purging disabled). See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
additional information.</para></listitem>
linkend="opt.dirty_decay_time"><mallctl>opt.dirty_decay_time</mallctl></link>
for additional information.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.muzzy_decay_time">
<term>
<mallctl>arena.&lt;i&gt;.muzzy_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Current per-arena approximate time in seconds from the
creation of a set of unused muzzy pages until an equivalent set of
unused muzzy pages is purged and/or reused. Each time this interface is
set, all currently unused muzzy pages are considered to have fully
decayed, which causes immediate purging of all unused muzzy pages unless
the decay time is set to -1 (i.e. purging disabled). See <link
linkend="opt.muzzy_decay_time"><mallctl>opt.muzzy_decay_time</mallctl></link>
for additional information.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.extent_hooks">
@ -1584,7 +1632,7 @@ struct extent_hooks_s {
mapped committed memory, in the simplest case followed by deallocation.
However, there are performance and platform reasons to retain extents
for later reuse. Cleanup attempts cascade from deallocation to decommit
to lazy purging to forced purging, which gives the extent management
to forced purging to lazy purging, which gives the extent management
functions opportunities to reject the most permanent cleanup operations
in favor of less permanent (and often less costly) operations. All
operations except allocation can be universally opted out of by setting
@ -1707,12 +1755,14 @@ struct extent_hooks_s {
<parameter>addr</parameter> and <parameter>size</parameter> at
<parameter>offset</parameter> bytes, extending for
<parameter>length</parameter> on behalf of arena
<parameter>arena_ind</parameter>. A lazy extent purge function can
delay purging indefinitely and leave the pages within the purged virtual
memory range in an indeterminite state, whereas a forced extent purge
function immediately purges, and the pages within the virtual memory
range will be zero-filled the next time they are accessed. If the
function returns true, this indicates failure to purge.</para>
<parameter>arena_ind</parameter>. A lazy extent purge function (e.g.
implemented via
<function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>)
can delay purging indefinitely and leave the pages within the purged
virtual memory range in an indeterminite state, whereas a forced extent
purge function immediately purges, and the pages within the virtual
memory range will be zero-filled the next time they are accessed. If
the function returns true, this indicates failure to purge.</para>
<funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(extent_split_t)</function></funcdef>
@ -1769,19 +1819,34 @@ struct extent_hooks_s {
<listitem><para>Current limit on number of arenas.</para></listitem>
</varlistentry>
<varlistentry id="arenas.decay_time">
<varlistentry id="arenas.dirty_decay_time">
<term>
<mallctl>arenas.decay_time</mallctl>
<mallctl>arenas.dirty_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Current default per-arena approximate time in seconds
from the creation of a set of unused dirty pages until an equivalent set
of unused dirty pages is purged and/or reused, used to initialize <link
linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
linkend="arena.i.dirty_decay_time"><mallctl>arena.&lt;i&gt;.dirty_decay_time</mallctl></link>
during arena creation. See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
additional information.</para></listitem>
linkend="opt.dirty_decay_time"><mallctl>opt.dirty_decay_time</mallctl></link>
for additional information.</para></listitem>
</varlistentry>
<varlistentry id="arenas.muzzy_decay_time">
<term>
<mallctl>arenas.muzzy_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Current default per-arena approximate time in seconds
from the creation of a set of unused muzzy pages until an equivalent set
of unused muzzy pages is purged and/or reused, used to initialize <link
linkend="arena.i.muzzy_decay_time"><mallctl>arena.&lt;i&gt;.muzzy_decay_time</mallctl></link>
during arena creation. See <link
linkend="opt.muzzy_decay_time"><mallctl>opt.muzzy_decay_time</mallctl></link>
for additional information.</para></listitem>
</varlistentry>
<varlistentry id="arenas.quantum">
@ -2014,7 +2079,9 @@ struct extent_hooks_s {
equal to <link
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
This does not include <link linkend="stats.arenas.i.pdirty">
<mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>, nor pages
<mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>,
<link linkend="stats.arenas.i.pmuzzy">
<mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl></link>, nor pages
entirely devoted to allocator metadata.</para></listitem>
</varlistentry>
@ -2099,16 +2166,29 @@ struct extent_hooks_s {
</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.decay_time">
<varlistentry id="stats.arenas.i.dirty_decay_time">
<term>
<mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
<mallctl>stats.arenas.&lt;i&gt;.dirty_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in seconds from the creation of a set
of unused dirty pages until an equivalent set of unused dirty pages is
purged and/or reused. See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
linkend="opt.dirty_decay_time"><mallctl>opt.dirty_decay_time</mallctl></link>
for details.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.muzzy_decay_time">
<term>
<mallctl>stats.arenas.&lt;i&gt;.muzzy_decay_time</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in seconds from the creation of a set
of unused muzzy pages until an equivalent set of unused muzzy pages is
purged and/or reused. See <link
linkend="opt.muzzy_decay_time"><mallctl>opt.muzzy_decay_time</mallctl></link>
for details.</para></listitem>
</varlistentry>
@ -2138,10 +2218,22 @@ struct extent_hooks_s {
<literal>r-</literal>
</term>
<listitem><para>Number of pages within unused extents that are
potentially dirty, and for which
<function>madvise(<parameter>...</parameter>
<parameter><constant>MADV_DONTNEED</constant></parameter>)</function> or
similar has not been called.</para></listitem>
potentially dirty, and for which <function>madvise()</function> or
similar has not been called. See <link
linkend="opt.dirty_decay_time"><mallctl>opt.dirty_decay_time</mallctl></link>
for a description of dirty pages.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.pmuzzy">
<term>
<mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Number of pages within unused extents that are muzzy.
See <link
linkend="opt.muzzy_decay_time"><mallctl>opt.muzzy_decay_time</mallctl></link>
for a description of muzzy pages.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.mapped">
@ -2207,9 +2299,9 @@ struct extent_hooks_s {
size.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.npurge">
<varlistentry id="stats.arenas.i.dirty_npurge">
<term>
<mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
<mallctl>stats.arenas.&lt;i&gt;.dirty_npurge</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
@ -2218,26 +2310,57 @@ struct extent_hooks_s {
</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.nmadvise">
<varlistentry id="stats.arenas.i.dirty_nmadvise">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nmadvise</mallctl>
<mallctl>stats.arenas.&lt;i&gt;.dirty_nmadvise</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of <function>madvise(<parameter>...</parameter>
<parameter><constant>MADV_DONTNEED</constant></parameter>)</function> or
similar calls made to purge dirty pages.</para></listitem>
<listitem><para>Number of <function>madvise()</function> or similar
calls made to purge dirty pages.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.purged">
<varlistentry id="stats.arenas.i.dirty_purged">
<term>
<mallctl>stats.arenas.&lt;i&gt;.purged</mallctl>
<mallctl>stats.arenas.&lt;i&gt;.dirty_purged</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of pages purged.</para></listitem>
<listitem><para>Number of dirty pages purged.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.muzzy_npurge">
<term>
<mallctl>stats.arenas.&lt;i&gt;.muzzy_npurge</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of muzzy page purge sweeps performed.
</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.muzzy_nmadvise">
<term>
<mallctl>stats.arenas.&lt;i&gt;.muzzy_nmadvise</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of <function>madvise()</function> or similar
calls made to purge muzzy pages.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.muzzy_purged">
<term>
<mallctl>stats.arenas.&lt;i&gt;.muzzy_purged</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of muzzy pages purged.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.small.allocated">

View File

@ -9,7 +9,8 @@ static const size_t large_pad =
#endif
;
extern ssize_t opt_decay_time;
extern ssize_t opt_dirty_decay_time;
extern ssize_t opt_muzzy_decay_time;
extern const arena_bin_info_t arena_bin_info[NBINS];
@ -22,13 +23,13 @@ void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
size_t size);
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *decay_time, size_t *nactive,
size_t *ndirty);
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_time,
ssize_t *muzzy_decay_time, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
const char **dss, ssize_t *dirty_decay_time, ssize_t *muzzy_decay_time,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
@ -41,9 +42,13 @@ void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
ssize_t arena_decay_time_get(arena_t *arena);
bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
ssize_t arena_dirty_decay_time_get(arena_t *arena);
bool arena_dirty_decay_time_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_time);
ssize_t arena_muzzy_decay_time_get(arena_t *arena);
bool arena_muzzy_decay_time_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_time);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool all);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
@ -74,8 +79,10 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_decay_time_default_get(void);
bool arena_decay_time_default_set(ssize_t decay_time);
ssize_t arena_dirty_decay_time_default_get(void);
bool arena_dirty_decay_time_default_set(ssize_t decay_time);
ssize_t arena_muzzy_decay_time_default_get(void);
bool arena_muzzy_decay_time_default_set(ssize_t decay_time);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);

View File

@ -75,13 +75,14 @@ arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
return;
}
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
arena_purge(tsdn, arena, false);
arena_decay(tsdn, arena, false);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_not_owner(tsdn, &arena->decay.mtx);
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
arena_decay_ticks(tsdn, arena, 1);
}

View File

@ -48,10 +48,8 @@ struct arena_decay_s {
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*
* Synchronization: atomic.
*/
ssize_t time;
atomic_zd_t time;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
@ -73,10 +71,10 @@ struct arena_decay_s {
*/
nstime_t deadline;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay.ndirty and
* extents_npages_get(&arena->extents_cached) to determine how many
* dirty pages, if any, were generated.
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t nunpurged;
/*
@ -86,6 +84,14 @@ struct arena_decay_s {
* relative to epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
decay_stats_t *stats;
};
struct arena_bin_s {
@ -194,15 +200,18 @@ struct arena_s {
*
* Synchronization: internal.
*/
extents_t extents_cached;
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
/*
* Decay-based purging state.
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: internal.
*/
arena_decay_t decay;
arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a

View File

@ -7,8 +7,9 @@
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay time in seconds. */
#define DECAY_TIME_DEFAULT 10
/* Default decay times in seconds. */
#define DIRTY_DECAY_TIME_DEFAULT 10
#define MUZZY_DECAY_TIME_DEFAULT 10
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000

View File

@ -51,9 +51,11 @@ struct ctl_arena_s {
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
const char *dss;
ssize_t decay_time;
ssize_t dirty_decay_time;
ssize_t muzzy_decay_time;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;

View File

@ -21,20 +21,21 @@ bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
bool slab);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,

View File

@ -4,7 +4,8 @@
typedef enum {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_retained = 2
extent_state_muzzy = 2,
extent_state_retained = 3
} extent_state_t;
/* Extent (span of pages). Use accessor functions for e_* fields. */

View File

@ -15,21 +15,26 @@ arena_dalloc_bin_junked_locked
arena_dalloc_junk_small
arena_dalloc_promoted
arena_dalloc_small
arena_decay
arena_decay_tick
arena_decay_ticks
arena_decay_time_default_get
arena_decay_time_default_set
arena_decay_time_get
arena_decay_time_set
arena_dirty_decay_time_default_get
arena_dirty_decay_time_default_set
arena_dirty_decay_time_get
arena_dirty_decay_time_set
arena_muzzy_decay_time_default_get
arena_muzzy_decay_time_default_set
arena_muzzy_decay_time_get
arena_muzzy_decay_time_set
arena_destroy
arena_dss_prec_get
arena_dss_prec_set
arena_extent_alloc_large
arena_extent_cache_dalloc
arena_extent_dalloc_large_prep
arena_extent_ralloc_large_expand
arena_extent_ralloc_large_shrink
arena_extent_sn_next
arena_extents_dirty_dalloc
arena_get
arena_ichoose
arena_ind_get
@ -59,7 +64,6 @@ arena_prof_promote
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_purge
arena_ralloc
arena_ralloc_no_move
arena_reset
@ -138,7 +142,6 @@ extent_commit_wrapper
extent_committed_get
extent_committed_set
extent_dalloc
extent_dalloc_cache
extent_dalloc_gap
extent_dalloc_mmap
extent_dalloc_wrapper
@ -192,6 +195,8 @@ extent_usize_get
extent_usize_set
extent_zeroed_get
extent_zeroed_set
extents_alloc
extents_dalloc
extents_evict
extents_init
extents_npages_get
@ -299,7 +304,8 @@ nstime_sec
nstime_subtract
nstime_update
opt_abort
opt_decay_time
opt_dirty_decay_time
opt_muzzy_decay_time
opt_dss
opt_junk
opt_junk_alloc

View File

@ -77,6 +77,15 @@ struct malloc_large_stats_s {
size_t curlextents; /* Derived. */
};
struct decay_stats_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
};
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
@ -84,7 +93,7 @@ struct malloc_large_stats_s {
*/
struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
@ -98,14 +107,8 @@ struct arena_stats_s {
*/
atomic_zu_t retained; /* Derived. */
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
arena_stats_u64_t npurge;
arena_stats_u64_t nmadvise;
arena_stats_u64_t purged;
decay_stats_t decay_dirty;
decay_stats_t decay_muzzy;
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;

View File

@ -4,6 +4,7 @@
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct decay_stats_s decay_stats_t;
typedef struct arena_stats_s arena_stats_t;
#endif /* JEMALLOC_INTERNAL_STATS_TYPES_H */

View File

@ -13,8 +13,10 @@ const char *percpu_arena_mode_names[] = {
const char *opt_percpu_arena = OPT_PERCPU_ARENA_DEFAULT;
percpu_arena_mode_t percpu_arena_mode = PERCPU_ARENA_MODE_DEFAULT;
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
ssize_t opt_dirty_decay_time = DIRTY_DECAY_TIME_DEFAULT;
ssize_t opt_muzzy_decay_time = MUZZY_DECAY_TIME_DEFAULT;
static ssize_t dirty_decay_time_default;
static ssize_t muzzy_decay_time_default;
const arena_bin_info_t arena_bin_info[NBINS] = {
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
@ -37,12 +39,13 @@ const arena_bin_info_t arena_bin_info[NBINS] = {
* definition.
*/
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, extents_t *extents, size_t ndirty_limit);
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena,
extent_t *slab, arena_bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
extent_t *slab, arena_bin_t *bin);
static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit);
static void arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool all);
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_t *bin);
/******************************************************************************/
@ -50,7 +53,7 @@ static bool
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
assert(((char *)arena_stats)[0] == 0);
assert(((char *)arena_stats)[i] == 0);
}
}
#ifndef JEMALLOC_ATOMIC_U64
@ -187,23 +190,27 @@ arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
void
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) {
const char **dss, ssize_t *dirty_decay_time, ssize_t *muzzy_decay_time,
size_t *nactive, size_t *ndirty,
size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena_dss_prec_get(arena)];
*decay_time = arena_decay_time_get(arena);
*dirty_decay_time = arena_dirty_decay_time_get(arena);
*muzzy_decay_time = arena_muzzy_decay_time_get(arena);
*nactive += atomic_read_zu(&arena->nactive);
*ndirty += extents_npages_get(&arena->extents_cached);
*ndirty += extents_npages_get(&arena->extents_dirty);
*nmuzzy += extents_npages_get(&arena->extents_muzzy);
}
void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats) {
const char **dss, ssize_t *dirty_decay_time, ssize_t *muzzy_decay_time,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) {
cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, decay_time,
nactive, ndirty);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_time,
muzzy_decay_time, nactive, ndirty, nmuzzy);
size_t base_allocated, base_resident, base_mapped;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
@ -215,17 +222,33 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
arena_stats_accum_zu(&astats->retained,
extents_npages_get(&arena->extents_retained) << LG_PAGE);
arena_stats_accum_u64(&astats->npurge, arena_stats_read_u64(tsdn,
&arena->stats, &arena->stats.npurge));
arena_stats_accum_u64(&astats->nmadvise, arena_stats_read_u64(tsdn,
&arena->stats, &arena->stats.nmadvise));
arena_stats_accum_u64(&astats->purged, arena_stats_read_u64(tsdn,
&arena->stats, &arena->stats.purged));
arena_stats_accum_u64(&astats->decay_dirty.npurge,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.npurge));
arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.nmadvise));
arena_stats_accum_u64(&astats->decay_dirty.purged,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.purged));
arena_stats_accum_u64(&astats->decay_muzzy.npurge,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.npurge));
arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.nmadvise));
arena_stats_accum_u64(&astats->decay_muzzy.purged,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.purged));
arena_stats_accum_zu(&astats->base, base_allocated);
arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
arena_stats_accum_zu(&astats->resident, base_resident
+ (((atomic_read_zu(&arena->nactive)
+ extents_npages_get(&arena->extents_cached)) << LG_PAGE)));
arena_stats_accum_zu(&astats->resident, base_resident +
(((atomic_read_zu(&arena->nactive) +
extents_npages_get(&arena->extents_dirty) +
extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
for (szind_t i = 0; i < NSIZES - NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
@ -292,13 +315,14 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
}
void
arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
if (arena_decay_time_get(arena) == 0) {
arena_purge(tsdn, arena, true);
extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
extent);
if (arena_dirty_decay_time_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, true);
}
}
@ -432,8 +456,14 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t mapped_add;
bool commit = true;
extent = extent_alloc_cache(tsdn, arena, &extent_hooks, NULL, usize,
large_pad, alignment, zero, &commit, false);
extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, usize, large_pad, alignment, zero,
&commit, false);
if (extent == NULL) {
extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, usize, large_pad, alignment,
zero, &commit, false);
}
size_t size = usize + large_pad;
if (extent == NULL) {
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
@ -507,12 +537,12 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
static ssize_t
arena_decay_time_read(arena_decay_t *decay) {
return atomic_read_zd(&decay->time);
return atomic_load_zd(&decay->time, ATOMIC_RELAXED);
}
static void
arena_decay_time_write(arena_decay_t *decay, ssize_t decay_time) {
atomic_write_zd(&decay->time, decay_time);
atomic_store_zd(&decay->time, decay_time, ATOMIC_RELAXED);
}
static void
@ -621,10 +651,11 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay, extents_t *extents,
static void
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, extents_t *extents) {
size_t ndirty_limit = arena_decay_backlog_npages_limit(decay);
size_t npages_limit = arena_decay_backlog_npages_limit(decay);
if (extents_npages_get(extents) > ndirty_limit) {
arena_purge_to_limit(tsdn, arena, decay, extents, ndirty_limit);
if (extents_npages_get(extents) > npages_limit) {
arena_decay_to_limit(tsdn, arena, decay, extents, false,
npages_limit);
}
/*
* There may be concurrent ndirty fluctuation between the purge above
@ -659,12 +690,22 @@ arena_decay_reinit(arena_decay_t *decay, extents_t *extents,
}
static bool
arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_time) {
arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_time,
decay_stats_t *stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
assert(((char *)decay)[i] == 0);
}
}
if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY)) {
return true;
}
decay->purging = false;
arena_decay_reinit(decay, extents, decay_time);
/* Memory is zeroed, so there is no need to clear stats. */
if (config_stats) {
decay->stats = stats;
}
return false;
}
@ -680,7 +721,7 @@ arena_decay_time_valid(ssize_t decay_time) {
}
static void
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);
@ -688,7 +729,8 @@ arena_maybe_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
ssize_t decay_time = arena_decay_time_read(decay);
if (decay_time <= 0) {
if (decay_time == 0) {
arena_purge_to_limit(tsdn, arena, decay, extents, 0);
arena_decay_to_limit(tsdn, arena, decay, extents, false,
0);
}
return;
}
@ -725,18 +767,29 @@ arena_maybe_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
}
}
ssize_t
arena_decay_time_get(arena_t *arena) {
return arena_decay_time_read(&arena->decay);
static ssize_t
arena_decay_time_get(arena_decay_t *decay) {
return arena_decay_time_read(decay);
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
ssize_t
arena_dirty_decay_time_get(arena_t *arena) {
return arena_decay_time_get(&arena->decay_dirty);
}
ssize_t
arena_muzzy_decay_time_get(arena_t *arena) {
return arena_decay_time_get(&arena->decay_muzzy);
}
static bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return true;
}
malloc_mutex_lock(tsdn, &arena->decay.mtx);
malloc_mutex_lock(tsdn, &decay->mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
@ -745,58 +798,100 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit(&arena->decay, &arena->extents_cached, decay_time);
arena_maybe_purge(tsdn, arena, &arena->decay, &arena->extents_cached);
malloc_mutex_unlock(tsdn, &arena->decay.mtx);
arena_decay_reinit(decay, extents, decay_time);
arena_maybe_decay(tsdn, arena, decay, extents);
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
}
bool
arena_dirty_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
return arena_decay_time_set(tsdn, arena, &arena->decay_dirty,
&arena->extents_dirty, decay_time);
}
bool
arena_muzzy_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
return arena_decay_time_set(tsdn, arena, &arena->decay_muzzy,
&arena->extents_muzzy, decay_time);
}
static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, size_t ndirty_limit, extent_list_t *purge_extents) {
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
extent_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/* Stash extents according to ndirty_limit. */
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
extent_t *extent;
while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
ndirty_limit)) != NULL) {
extent_list_append(purge_extents, extent);
npages_limit)) != NULL) {
extent_list_append(decay_extents, extent);
nstashed += extent_size_get(extent) >> LG_PAGE;
}
return nstashed;
}
static size_t
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_list_t *purge_extents) {
UNUSED size_t nmadvise;
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
bool all, extent_list_t *decay_extents) {
UNUSED size_t nmadvise, nunmapped;
size_t npurged;
if (config_stats) {
nmadvise = 0;
nunmapped = 0;
}
npurged = 0;
for (extent_t *extent = extent_list_first(purge_extents); extent !=
NULL; extent = extent_list_first(purge_extents)) {
ssize_t muzzy_decay_time = arena_muzzy_decay_time_get(arena);
for (extent_t *extent = extent_list_first(decay_extents); extent !=
NULL; extent = extent_list_first(decay_extents)) {
if (config_stats) {
nmadvise++;
}
npurged += extent_size_get(extent) >> LG_PAGE;
extent_list_remove(purge_extents, extent);
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent);
size_t npages = extent_size_get(extent) >> LG_PAGE;
npurged += npages;
extent_list_remove(decay_extents, extent);
switch (extents_state_get(extents)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
if (!all && muzzy_decay_time != 0 &&
!extent_purge_lazy_wrapper(tsdn, arena,
r_extent_hooks, extent, 0,
extent_size_get(extent))) {
extents_dalloc(tsdn, arena, r_extent_hooks,
&arena->extents_muzzy, extent);
break;
}
/* Fall through. */
case extent_state_muzzy:
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
extent);
if (config_stats) {
nunmapped += npages;
}
break;
case extent_state_retained:
default:
not_reached();
}
}
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.nmadvise,
nmadvise);
arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.purged,
arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
1);
arena_stats_add_u64(tsdn, &arena->stats,
&decay->stats->nmadvise, nmadvise);
arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
npurged);
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
npurged << LG_PAGE);
nunmapped);
arena_stats_unlock(tsdn, &arena->stats);
}
@ -804,12 +899,12 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
}
/*
* ndirty_limit: Purge as many dirty extents as possible without violating the
* invariant: (extents_npages_get(extents) >= ndirty_limit)
* npages_limit: Decay as many dirty extents as possible without violating the
* invariant: (extents_npages_get(extents) >= npages_limit)
*/
static void
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, size_t ndirty_limit) {
arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, bool all, size_t npages_limit) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
malloc_mutex_assert_owner(tsdn, &decay->mtx);
@ -817,49 +912,53 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
return;
}
decay->purging = true;
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
size_t npurge, npurged;
extent_list_t purge_extents;
extent_list_init(&purge_extents);
malloc_mutex_unlock(tsdn, &decay->mtx);
npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, extents,
ndirty_limit, &purge_extents);
if (npurge == 0) {
malloc_mutex_lock(tsdn, &decay->mtx);
goto label_return;
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
extent_list_t decay_extents;
extent_list_init(&decay_extents);
size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
npages_limit, &decay_extents);
if (npurge != 0) {
UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
&extent_hooks, decay, extents, all, &decay_extents);
assert(npurged == npurge);
}
npurged = arena_purge_stashed(tsdn, arena, &extent_hooks,
&purge_extents);
assert(npurged == npurge);
malloc_mutex_lock(tsdn, &decay->mtx);
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.npurge,
1);
arena_stats_unlock(tsdn, &arena->stats);
}
label_return:
decay->purging = false;
}
void
arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) {
malloc_mutex_lock(tsdn, &arena->decay.mtx);
static void
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, bool all) {
malloc_mutex_lock(tsdn, &decay->mtx);
if (all) {
arena_purge_to_limit(tsdn, arena, &arena->decay,
&arena->extents_cached, 0);
arena_decay_to_limit(tsdn, arena, decay, extents, all, 0);
} else {
arena_maybe_purge(tsdn, arena, &arena->decay,
&arena->extents_cached);
arena_maybe_decay(tsdn, arena, decay, extents);
}
malloc_mutex_unlock(tsdn, &arena->decay.mtx);
malloc_mutex_unlock(tsdn, &decay->mtx);
}
static void
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool all) {
arena_decay_impl(tsdn, arena, &arena->decay_dirty,
&arena->extents_dirty, all);
}
static void
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool all) {
arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
&arena->extents_muzzy, all);
}
void
arena_decay(tsdn_t *tsdn, arena_t *arena, bool all) {
arena_decay_dirty(tsdn, arena, all);
arena_decay_muzzy(tsdn, arena, all);
}
static void
@ -867,7 +966,7 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, slab);
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
}
static void
@ -1008,7 +1107,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain.
*/
assert(extents_npages_get(&arena->extents_cached) == 0);
assert(extents_npages_get(&arena->extents_dirty) == 0);
/* Attempt to deallocate retained memory. */
arena_destroy_retained(tsd_tsdn(tsd), arena);
@ -1061,8 +1160,14 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
bool zero = false;
bool commit = true;
extent_t *slab = extent_alloc_cache(tsdn, arena, &extent_hooks, NULL,
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, &zero,
&commit, true);
if (slab == NULL) {
slab = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
&zero, &commit, true);
}
if (slab == NULL) {
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
bin_info);
@ -1622,16 +1727,32 @@ arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
}
ssize_t
arena_decay_time_default_get(void) {
return (ssize_t)atomic_read_zu((size_t *)&decay_time_default);
arena_dirty_decay_time_default_get(void) {
return (ssize_t)atomic_read_zu((size_t *)&dirty_decay_time_default);
}
bool
arena_decay_time_default_set(ssize_t decay_time) {
arena_dirty_decay_time_default_set(ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return true;
}
atomic_write_zu((size_t *)&decay_time_default, (size_t)decay_time);
atomic_write_zu((size_t *)&dirty_decay_time_default,
(size_t)decay_time);
return false;
}
ssize_t
arena_muzzy_decay_time_default_get(void) {
return (ssize_t)atomic_read_zu((size_t *)&muzzy_decay_time_default);
}
bool
arena_muzzy_decay_time_default_set(ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return true;
}
atomic_write_zu((size_t *)&muzzy_decay_time_default,
(size_t)decay_time);
return false;
}
@ -1723,28 +1844,40 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
/*
* Delay coalescing for cached extents despite the disruptive effect on
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
* are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial.
*/
if (extents_init(tsdn, &arena->extents_cached, extent_state_dirty,
if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
true)) {
goto label_error;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
false)) {
goto label_error;
}
/*
* Coalesce retained extents immediately, in part because they will
* never be evicted (and therefore there's no opportunity for delayed
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if (extents_init(tsdn, &arena->extents_retained,
extent_state_retained, false)) {
if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
false)) {
goto label_error;
}
if (arena_decay_init(&arena->decay, &arena->extents_cached,
arena_decay_time_default_get())) {
if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty,
arena_dirty_decay_time_default_get(), &arena->stats.decay_dirty)) {
goto label_error;
}
if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy,
arena_muzzy_decay_time_default_get(), &arena->stats.decay_muzzy)) {
goto label_error;
}
@ -1785,12 +1918,14 @@ label_error:
void
arena_boot(void) {
arena_decay_time_default_set(opt_decay_time);
arena_dirty_decay_time_default_set(opt_dirty_decay_time);
arena_muzzy_decay_time_default_set(opt_muzzy_decay_time);
}
void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->decay.mtx);
malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
}
void
@ -1802,7 +1937,8 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
extents_prefork(tsdn, &arena->extents_cached);
extents_prefork(tsdn, &arena->extents_dirty);
extents_prefork(tsdn, &arena->extents_muzzy);
extents_prefork(tsdn, &arena->extents_retained);
}
@ -1838,9 +1974,11 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_freelist_mtx);
extents_postfork_parent(tsdn, &arena->extents_cached);
extents_postfork_parent(tsdn, &arena->extents_dirty);
extents_postfork_parent(tsdn, &arena->extents_muzzy);
extents_postfork_parent(tsdn, &arena->extents_retained);
malloc_mutex_postfork_parent(tsdn, &arena->decay.mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
if (config_stats && config_tcache) {
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
}
@ -1856,9 +1994,11 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_freelist_mtx);
extents_postfork_child(tsdn, &arena->extents_cached);
extents_postfork_child(tsdn, &arena->extents_dirty);
extents_postfork_child(tsdn, &arena->extents_muzzy);
extents_postfork_child(tsdn, &arena->extents_retained);
malloc_mutex_postfork_child(tsdn, &arena->decay.mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
if (config_stats && config_tcache) {
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
}

214
src/ctl.c
View File

@ -73,7 +73,8 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_dirty_decay_time)
CTL_PROTO(opt_muzzy_decay_time)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
@ -95,12 +96,13 @@ CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
CTL_PROTO(arena_i_initialized)
CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_dirty_decay_time)
CTL_PROTO(arena_i_muzzy_decay_time)
CTL_PROTO(arena_i_extent_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
@ -110,7 +112,8 @@ INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lextent_i_size)
INDEX_PROTO(arenas_lextent_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_dirty_decay_time)
CTL_PROTO(arenas_muzzy_decay_time)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
@ -150,14 +153,19 @@ CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
INDEX_PROTO(stats_arenas_i_lextents_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_dirty_decay_time)
CTL_PROTO(stats_arenas_i_muzzy_decay_time)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_pmuzzy)
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
CTL_PROTO(stats_arenas_i_dirty_npurge)
CTL_PROTO(stats_arenas_i_dirty_nmadvise)
CTL_PROTO(stats_arenas_i_dirty_purged)
CTL_PROTO(stats_arenas_i_muzzy_npurge)
CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
CTL_PROTO(stats_arenas_i_muzzy_purged)
CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_tcache_bytes)
@ -231,7 +239,8 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("decay_time"), CTL(opt_decay_time)},
{NAME("dirty_decay_time"), CTL(opt_dirty_decay_time)},
{NAME("muzzy_decay_time"), CTL(opt_muzzy_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
@ -259,12 +268,13 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = {
{NAME("initialized"), CTL(arena_i_initialized)},
{NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("purge"), CTL(arena_i_purge)},
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("dirty_decay_time"), CTL(arena_i_dirty_decay_time)},
{NAME("muzzy_decay_time"), CTL(arena_i_muzzy_decay_time)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
@ -301,7 +311,8 @@ static const ctl_indexed_node_t arenas_lextent_node[] = {
static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("dirty_decay_time"), CTL(arenas_dirty_decay_time)},
{NAME("muzzy_decay_time"), CTL(arenas_muzzy_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
@ -373,14 +384,19 @@ static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("dirty_decay_time"), CTL(stats_arenas_i_dirty_decay_time)},
{NAME("muzzy_decay_time"), CTL(stats_arenas_i_muzzy_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
{NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
{NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
{NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
{NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
{NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
{NAME("base"), CTL(stats_arenas_i_base)},
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
@ -554,9 +570,11 @@ static void
ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->nthreads = 0;
ctl_arena->dss = dss_prec_names[dss_prec_limit];
ctl_arena->decay_time = -1;
ctl_arena->dirty_decay_time = -1;
ctl_arena->muzzy_decay_time = -1;
ctl_arena->pactive = 0;
ctl_arena->pdirty = 0;
ctl_arena->pmuzzy = 0;
if (config_stats) {
memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
ctl_arena->astats->allocated_small = 0;
@ -576,8 +594,9 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
if (config_stats) {
arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
&ctl_arena->dss, &ctl_arena->decay_time,
&ctl_arena->pactive, &ctl_arena->pdirty,
&ctl_arena->dss, &ctl_arena->dirty_decay_time,
&ctl_arena->muzzy_decay_time, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats,
ctl_arena->astats->lstats);
@ -594,8 +613,9 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
}
} else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
&ctl_arena->dss, &ctl_arena->decay_time,
&ctl_arena->pactive, &ctl_arena->pdirty);
&ctl_arena->dss, &ctl_arena->dirty_decay_time,
&ctl_arena->muzzy_decay_time, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy);
}
}
@ -608,10 +628,12 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_sdarena->nthreads += ctl_arena->nthreads;
ctl_sdarena->pactive += ctl_arena->pactive;
ctl_sdarena->pdirty += ctl_arena->pdirty;
ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
} else {
assert(ctl_arena->nthreads == 0);
assert(ctl_arena->pactive == 0);
assert(ctl_arena->pdirty == 0);
assert(ctl_arena->pmuzzy == 0);
}
if (config_stats) {
@ -624,12 +646,20 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained);
}
accum_arena_stats_u64(&sdstats->astats.npurge,
&astats->astats.npurge);
accum_arena_stats_u64(&sdstats->astats.nmadvise,
&astats->astats.nmadvise);
accum_arena_stats_u64(&sdstats->astats.purged,
&astats->astats.purged);
accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
&astats->astats.decay_dirty.npurge);
accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
&astats->astats.decay_dirty.nmadvise);
accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
&astats->astats.decay_dirty.purged);
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
&astats->astats.decay_muzzy.npurge);
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
&astats->astats.decay_muzzy.nmadvise);
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
&astats->astats.decay_muzzy.purged);
if (!destroyed) {
accum_atomic_zu(&sdstats->astats.base,
@ -1340,7 +1370,8 @@ CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, opt_percpu_arena, const char *)
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_dirty_decay_time, opt_dirty_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_time, opt_muzzy_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
@ -1630,7 +1661,7 @@ label_return:
}
static void
arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) {
arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
malloc_mutex_lock(tsdn, &ctl_mtx);
{
unsigned narenas = ctl_arenas->narenas;
@ -1655,7 +1686,7 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) {
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL) {
arena_purge(tsdn, tarenas[i], all);
arena_decay(tsdn, tarenas[i], all);
}
}
} else {
@ -1669,28 +1700,12 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) {
malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL) {
arena_purge(tsdn, tarena, all);
arena_decay(tsdn, tarena, all);
}
}
}
}
static int
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
READONLY();
WRITEONLY();
MIB_UNSIGNED(arena_ind, 1);
arena_i_purge(tsd_tsdn(tsd), arena_ind, true);
ret = 0;
label_return:
return ret;
}
static int
arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
@ -1700,7 +1715,23 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READONLY();
WRITEONLY();
MIB_UNSIGNED(arena_ind, 1);
arena_i_purge(tsd_tsdn(tsd), arena_ind, false);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
ret = 0;
label_return:
return ret;
}
static int
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
READONLY();
WRITEONLY();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
ret = 0;
label_return:
@ -1773,7 +1804,7 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Merge stats after resetting and purging arena. */
arena_reset(tsd, arena);
arena_purge(tsd_tsdn(tsd), arena, true);
arena_decay(tsd_tsdn(tsd), arena, true);
ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
ctl_darena->initialized = true;
ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
@ -1852,8 +1883,8 @@ label_return:
}
static int
arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
arena_i_decay_time_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
unsigned arena_ind;
arena_t *arena;
@ -1866,7 +1897,8 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_get(arena);
size_t oldval = dirty ? arena_dirty_decay_time_get(arena) :
arena_muzzy_decay_time_get(arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@ -1874,7 +1906,9 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_set(tsd_tsdn(tsd), arena,
if (dirty ? arena_dirty_decay_time_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp) :
arena_muzzy_decay_time_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
@ -1886,6 +1920,20 @@ label_return:
return ret;
}
static int
arena_i_dirty_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arena_i_decay_time_ctl_impl(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, true);
}
static int
arena_i_muzzy_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arena_i_decay_time_ctl_impl(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, false);
}
static int
arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
@ -1967,12 +2015,13 @@ label_return:
}
static int
arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
arenas_decay_time_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_default_get();
size_t oldval = (dirty ? arena_dirty_decay_time_default_get() :
arena_muzzy_decay_time_default_get());
READ(oldval, ssize_t);
}
if (newp != NULL) {
@ -1980,7 +2029,8 @@ arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_default_set(*(ssize_t *)newp)) {
if (dirty ? arena_dirty_decay_time_default_set(*(ssize_t *)newp)
: arena_muzzy_decay_time_default_set(*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@ -1991,6 +2041,20 @@ label_return:
return ret;
}
static int
arenas_dirty_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arenas_decay_time_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
newlen, true);
}
static int
arenas_muzzy_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return arenas_decay_time_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
newlen, false);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@ -2182,23 +2246,41 @@ CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_decay_time, arenas_i(mib[2])->decay_time,
CTL_RO_GEN(stats_arenas_i_dirty_decay_time, arenas_i(mib[2])->dirty_decay_time,
ssize_t)
CTL_RO_GEN(stats_arenas_i_muzzy_decay_time, arenas_i(mib[2])->muzzy_decay_time,
ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.npurge), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.purged), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
size_t)
@ -2268,8 +2350,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
arena_stats_read_u64(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)

View File

@ -69,6 +69,10 @@ static size_t highpages;
*/
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
bool slab);
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced);
@ -293,6 +297,31 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
return false;
}
extent_t *
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab) {
assert(usize + pad != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
usize, pad, alignment, zero, commit, slab);
}
void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent));
extent_zeroed_set(extent, false);
extent_record(tsdn, arena, r_extent_hooks, extents, extent);
}
extent_t *
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, size_t npages_min) {
@ -340,7 +369,10 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* concurrent operations.
*/
switch (extents_state_get(extents)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
case extent_state_muzzy:
extent_state_set(extent, extent_state_active);
break;
case extent_state_retained:
@ -813,19 +845,6 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
extent_t *
extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab) {
assert(usize + pad != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
return extent_recycle(tsdn, arena, r_extent_hooks,
&arena->extents_cached, new_addr, usize, pad, alignment, zero,
commit, slab);
}
static void *
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit) {
@ -1206,7 +1225,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert(extents_state_get(extents) != extent_state_dirty ||
assert((extents_state_get(extents) != extent_state_dirty &&
extents_state_get(extents) != extent_state_muzzy) ||
!extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &extents->mtx);
@ -1244,20 +1264,6 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
}
void
extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent));
extent_zeroed_set(extent, false);
extent_record(tsdn, arena, r_extent_hooks, &arena->extents_cached,
extent);
}
static bool
extent_dalloc_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
@ -1327,16 +1333,17 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent))) {
zeroed = true;
} else if ((*r_extent_hooks)->purge_lazy != NULL &&
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena))) {
zeroed = false;
} else if ((*r_extent_hooks)->purge_forced != NULL &&
!(*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena))) {
zeroed = true;
} else if (extent_state_get(extent) == extent_state_muzzy ||
((*r_extent_hooks)->purge_lazy != NULL &&
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena)))) {
zeroed = false;
} else {
zeroed = false;
}

View File

@ -1064,8 +1064,10 @@ malloc_conf_init(void) {
}
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
UINT_MAX, yes, no, false)
CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
NSTIME_SEC_MAX);
CONF_HANDLE_SSIZE_T(opt_dirty_decay_time,
"dirty_decay_time", -1, NSTIME_SEC_MAX);
CONF_HANDLE_SSIZE_T(opt_muzzy_decay_time,
"muzzy_decay_time", -1, NSTIME_SEC_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
if (CONF_MATCH("junk")) {

View File

@ -125,7 +125,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
extent_usize_get(trail));
}
arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, trail);
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
}
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
@ -158,9 +158,16 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
bool commit = true;
extent_t *trail;
bool new_mapping;
if ((trail = extent_alloc_cache(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE, &is_zeroed_trail,
&commit, false)) == NULL) {
if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, extent_past_get(extent), trailsize, 0,
CACHELINE, &is_zeroed_trail, &commit, false)) != NULL
|| (trail = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
CACHELINE, &is_zeroed_trail, &commit, false)) != NULL) {
if (config_stats) {
new_mapping = false;
}
} else {
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE,
&is_zeroed_trail, &commit, false)) == NULL) {
@ -169,10 +176,6 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
if (config_stats) {
new_mapping = true;
}
} else {
if (config_stats) {
new_mapping = false;
}
}
if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
@ -327,7 +330,7 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, extent);
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
}
void

View File

@ -259,10 +259,11 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, unsigned i, bool bins, bool large) {
unsigned nthreads;
const char *dss;
ssize_t decay_time;
size_t page, pactive, pdirty, mapped, retained;
ssize_t dirty_decay_time, muzzy_decay_time;
size_t page, pactive, pdirty, pmuzzy, mapped, retained;
size_t base, internal, resident;
uint64_t npurge, nmadvise, purged;
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
@ -289,39 +290,70 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"dss allocation precedence: %s\n", dss);
}
CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"decay_time\": %zd,\n", decay_time);
} else {
if (decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
decay_time);
} else {
malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
}
}
CTL_M2_GET("stats.arenas.0.dirty_decay_time", i, &dirty_decay_time,
ssize_t);
CTL_M2_GET("stats.arenas.0.muzzy_decay_time", i, &muzzy_decay_time,
ssize_t);
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
uint64_t);
CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
uint64_t);
CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"dirty_decay_time\": %zd,\n", dirty_decay_time);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"muzzy_decay_time\": %zd,\n", muzzy_decay_time);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"pactive\": %zu,\n", pactive);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"pdirty\": %zu,\n", pdirty);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
"\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
"\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"purged\": %"FMTu64",\n", purged);
"\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged);
} else {
malloc_cprintf(write_cb, cbopaque,
"purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
"decaying: time npages sweeps madvises"
" purged\n");
if (dirty_decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque,
" dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
FMTu64"\n", dirty_decay_time, pdirty, dirty_npurge,
dirty_nmadvise, dirty_purged);
} else {
malloc_cprintf(write_cb, cbopaque,
" dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise,
dirty_purged);
}
if (muzzy_decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque,
" muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
FMTu64"\n", muzzy_decay_time, pmuzzy, muzzy_npurge,
muzzy_nmadvise, muzzy_purged);
} else {
malloc_cprintf(write_cb, cbopaque,
" muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise,
muzzy_purged);
}
}
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
@ -622,7 +654,10 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_CHAR_P(dss, ",")
OPT_WRITE_UNSIGNED(narenas, ",")
OPT_WRITE_CHAR_P(percpu_arena, ",")
OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_time, arenas.dirty_decay_time,
",")
OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_time, arenas.muzzy_decay_time,
",")
OPT_WRITE_CHAR_P(junk, ",")
OPT_WRITE_BOOL(zero, ",")
OPT_WRITE_BOOL(utrace, ",")
@ -670,16 +705,26 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
}
CTL_GET("arenas.decay_time", &ssv, ssize_t);
CTL_GET("arenas.dirty_decay_time", &ssv, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"decay_time\": %zd,\n", ssv);
"\t\t\t\"dirty_decay_time\": %zd,\n", ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Unused dirty page decay time: %zd%s\n", ssv, (ssv < 0) ?
" (no decay)" : "");
}
CTL_GET("arenas.muzzy_decay_time", &ssv, ssize_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"muzzy_decay_time\": %zd,\n", ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Unused muzzy page decay time: %zd%s\n", ssv, (ssv < 0) ?
" (no decay)" : "");
}
CTL_GET("arenas.quantum", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,

View File

@ -22,18 +22,28 @@ nstime_update_mock(nstime_t *time) {
}
static unsigned
do_arena_create(ssize_t decay_time) {
do_arena_create(ssize_t dirty_decay_time, ssize_t muzzy_decay_time) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.decay_time", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlnametomib("arena.0.dirty_decay_time", mib, &miblen),
0, "Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&decay_time,
sizeof(decay_time)), 0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
(void *)&dirty_decay_time,
sizeof(dirty_decay_time)), 0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_time", mib, &miblen),
0, "Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
(void *)&muzzy_decay_time,
sizeof(muzzy_decay_time)), 0, "Unexpected mallctlbymib() failure");
return arena_ind;
}
@ -78,11 +88,10 @@ do_decay(unsigned arena_ind) {
}
static uint64_t
get_arena_npurge(unsigned arena_ind) {
do_epoch();
get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("stats.arenas.0.npurge", mib, &miblen), 0,
assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[2] = (size_t)arena_ind;
uint64_t npurge = 0;
@ -92,6 +101,25 @@ get_arena_npurge(unsigned arena_ind) {
return npurge;
}
static uint64_t
get_arena_dirty_npurge(unsigned arena_ind) {
do_epoch();
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
}
static uint64_t
get_arena_muzzy_npurge(unsigned arena_ind) {
do_epoch();
return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
}
static uint64_t
get_arena_npurge(unsigned arena_ind) {
do_epoch();
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
}
static size_t
get_arena_pdirty(unsigned arena_ind) {
do_epoch();
@ -107,6 +135,21 @@ get_arena_pdirty(unsigned arena_ind) {
return pdirty;
}
static size_t
get_arena_pmuzzy(unsigned arena_ind) {
do_epoch();
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[2] = (size_t)arena_ind;
size_t pmuzzy;
size_t sz = sizeof(pmuzzy);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
return pmuzzy;
}
static void *
do_mallocx(size_t size, int flags) {
void *p = mallocx(size, flags);
@ -133,7 +176,7 @@ TEST_BEGIN(test_decay_ticks) {
int err;
/* Set up a manually managed arena for test. */
arena_ind = do_arena_create(0);
arena_ind = do_arena_create(0, 0);
/* Migrate to the new arena, and get the ticker. */
unsigned old_arena_ind;
@ -317,19 +360,66 @@ TEST_BEGIN(test_decay_ticks) {
}
TEST_END
TEST_BEGIN(test_decay_ticker) {
#define NPS 1024
static void
decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
#define NINTERVALS 101
ssize_t dt = opt_decay_time;
unsigned arena_ind = do_arena_create(dt);
nstime_t time, update_interval, decay_time, deadline;
nstime_init(&time, 0);
nstime_update(&time);
nstime_init2(&decay_time, dt, 0);
nstime_copy(&deadline, &time);
nstime_add(&deadline, &decay_time);
nstime_init2(&update_interval, dt, 0);
nstime_idivide(&update_interval, NINTERVALS);
/*
* Keep q's slab from being deallocated during the looping below. If a
* cached slab were to repeatedly come and go during looping, it could
* prevent the decay backlog ever becoming empty.
*/
void *p = do_mallocx(1, flags);
uint64_t dirty_npurge1, muzzy_npurge1;
do {
for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2;
i++) {
void *q = do_mallocx(1, flags);
dallocx(q, flags);
}
dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
nstime_add(&time_mock, &update_interval);
nstime_update(&time);
} while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
!terminate_asap));
dallocx(p, flags);
if (config_stats) {
assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
muzzy_npurge0, "Expected purging to occur");
}
#undef NINTERVALS
}
TEST_BEGIN(test_decay_ticker) {
#define NPS 2048
ssize_t ddt = opt_dirty_decay_time;
ssize_t mdt = opt_muzzy_decay_time;
unsigned arena_ind = do_arena_create(ddt, mdt);
int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
void *ps[NPS];
size_t large;
/*
* Allocate a bunch of large objects, pause the clock, deallocate the
* objects, restore the clock, then [md]allocx() in a tight loop while
* advancing time rapidly to verify the ticker triggers purging.
* Allocate a bunch of large objects, pause the clock, deallocate every
* other object (to fragment virtual memory), restore the clock, then
* [md]allocx() in a tight loop while advancing time rapidly to verify
* the ticker triggers purging.
*/
if (config_tcache) {
@ -346,7 +436,8 @@ TEST_BEGIN(test_decay_ticker) {
}
do_purge(arena_ind);
uint64_t npurge0 = get_arena_npurge(arena_ind);
uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
for (unsigned i = 0; i < NPS; i++) {
ps[i] = do_mallocx(large, flags);
@ -362,7 +453,7 @@ TEST_BEGIN(test_decay_ticker) {
nstime_monotonic = nstime_monotonic_mock;
nstime_update = nstime_update_mock;
for (unsigned i = 0; i < NPS; i++) {
for (unsigned i = 0; i < NPS; i += 2) {
dallocx(ps[i], flags);
unsigned nupdates0 = nupdates_mock;
do_decay(arena_ind);
@ -370,51 +461,16 @@ TEST_BEGIN(test_decay_ticker) {
"Expected nstime_update() to be called");
}
nstime_t time, update_interval, decay_time, deadline;
decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
muzzy_npurge0, true);
decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
muzzy_npurge0, false);
nstime_init(&time, 0);
nstime_update(&time);
nstime_init2(&decay_time, dt, 0);
nstime_copy(&deadline, &time);
nstime_add(&deadline, &decay_time);
nstime_init2(&update_interval, dt, 0);
nstime_idivide(&update_interval, NINTERVALS);
nstime_init2(&decay_time, dt, 0);
nstime_copy(&deadline, &time);
nstime_add(&deadline, &decay_time);
/*
* Keep q's slab from being deallocated during the looping below. If
* a cached slab were to repeatedly come and go during looping, it could
* prevent the decay backlog ever becoming empty.
*/
void *p = do_mallocx(1, flags);
uint64_t npurge1;
do {
for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
void *q = do_mallocx(1, flags);
dallocx(q, flags);
}
npurge1 = get_arena_npurge(arena_ind);
nstime_add(&time_mock, &update_interval);
nstime_update(&time);
} while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
dallocx(p, flags);
do_arena_destroy(arena_ind);
nstime_monotonic = nstime_monotonic_orig;
nstime_update = nstime_update_orig;
if (config_stats) {
assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
}
do_arena_destroy(arena_ind);
#undef NPS
#undef NINTERVALS
}
TEST_END
@ -435,8 +491,7 @@ TEST_BEGIN(test_decay_nonmonotonic) {
"Unexpected mallctl failure");
do_epoch();
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
npurge0 = get_arena_npurge(0);
nupdates_mock = 0;
nstime_init(&time_mock, 0);
@ -464,8 +519,7 @@ TEST_BEGIN(test_decay_nonmonotonic) {
do_epoch();
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
npurge1 = get_arena_npurge(0);
if (config_stats) {
assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
@ -478,24 +532,28 @@ TEST_BEGIN(test_decay_nonmonotonic) {
TEST_END
TEST_BEGIN(test_decay_now) {
unsigned arena_ind = do_arena_create(0);
unsigned arena_ind = do_arena_create(0, 0);
assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
/* Verify that dirty pages never linger after deallocation. */
/* Verify that dirty/muzzy pages never linger after deallocation. */
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
size_t size = sizes[i];
generate_dirty(arena_ind, size);
assert_zu_eq(get_arena_pdirty(arena_ind), 0,
"Unexpected dirty pages");
assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
"Unexpected muzzy pages");
}
do_arena_destroy(arena_ind);
}
TEST_END
TEST_BEGIN(test_decay_never) {
unsigned arena_ind = do_arena_create(-1);
unsigned arena_ind = do_arena_create(-1, -1);
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
void *ptrs[sizeof(sizes)/sizeof(size_t)];
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
@ -503,12 +561,16 @@ TEST_BEGIN(test_decay_never) {
}
/* Verify that each deallocation generates additional dirty pages. */
size_t pdirty_prev = get_arena_pdirty(arena_ind);
size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
dallocx(ptrs[i], flags);
size_t pdirty = get_arena_pdirty(arena_ind);
size_t pmuzzy = get_arena_pmuzzy(arena_ind);
assert_zu_gt(pdirty, pdirty_prev,
"Expected dirty pages to increase.");
assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
pdirty_prev = pdirty;
}
do_arena_destroy(arena_ind);

View File

@ -1,6 +1,6 @@
#!/bin/sh
export MALLOC_CONF="decay_time:1"
export MALLOC_CONF="dirty_decay_time:1,muzzy_decay_time:1"
if [ "x${enable_tcache}" = "x1" ] ; then
export MALLOC_CONF="${MALLOC_CONF},lg_tcache_max:0"
fi

View File

@ -161,7 +161,8 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
TEST_MALLCTL_OPT(ssize_t, decay_time, always);
TEST_MALLCTL_OPT(ssize_t, dirty_decay_time, always);
TEST_MALLCTL_OPT(ssize_t, muzzy_decay_time, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
TEST_MALLCTL_OPT(const char *, junk, fill);
TEST_MALLCTL_OPT(bool, zero, fill);
@ -401,32 +402,68 @@ TEST_BEGIN(test_arena_i_initialized) {
}
TEST_END
TEST_BEGIN(test_arena_i_decay_time) {
ssize_t decay_time, orig_decay_time, prev_decay_time;
TEST_BEGIN(test_arena_i_dirty_decay_time) {
ssize_t dirty_decay_time, orig_dirty_decay_time, prev_dirty_decay_time;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
decay_time = -2;
assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
(void *)&decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
decay_time = 0x7fffffff;
assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
(void *)&decay_time, sizeof(ssize_t)), 0,
assert_d_eq(mallctl("arena.0.dirty_decay_time",
(void *)&orig_dirty_decay_time, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
for (prev_decay_time = decay_time, decay_time = -1;
decay_time < 20; prev_decay_time = decay_time, decay_time++) {
ssize_t old_decay_time;
dirty_decay_time = -2;
assert_d_eq(mallctl("arena.0.dirty_decay_time", NULL, NULL,
(void *)&dirty_decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time,
&sz, (void *)&decay_time, sizeof(ssize_t)), 0,
dirty_decay_time = 0x7fffffff;
assert_d_eq(mallctl("arena.0.dirty_decay_time", NULL, NULL,
(void *)&dirty_decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
for (prev_dirty_decay_time = dirty_decay_time, dirty_decay_time = -1;
dirty_decay_time < 20; prev_dirty_decay_time = dirty_decay_time,
dirty_decay_time++) {
ssize_t old_dirty_decay_time;
assert_d_eq(mallctl("arena.0.dirty_decay_time",
(void *)&old_dirty_decay_time, &sz,
(void *)&dirty_decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_decay_time, prev_decay_time,
"Unexpected old arena.0.decay_time");
assert_zd_eq(old_dirty_decay_time, prev_dirty_decay_time,
"Unexpected old arena.0.dirty_decay_time");
}
}
TEST_END
TEST_BEGIN(test_arena_i_muzzy_decay_time) {
ssize_t muzzy_decay_time, orig_muzzy_decay_time, prev_muzzy_decay_time;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arena.0.muzzy_decay_time",
(void *)&orig_muzzy_decay_time, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
muzzy_decay_time = -2;
assert_d_eq(mallctl("arena.0.muzzy_decay_time", NULL, NULL,
(void *)&muzzy_decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
muzzy_decay_time = 0x7fffffff;
assert_d_eq(mallctl("arena.0.muzzy_decay_time", NULL, NULL,
(void *)&muzzy_decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
for (prev_muzzy_decay_time = muzzy_decay_time, muzzy_decay_time = -1;
muzzy_decay_time < 20; prev_muzzy_decay_time = muzzy_decay_time,
muzzy_decay_time++) {
ssize_t old_muzzy_decay_time;
assert_d_eq(mallctl("arena.0.muzzy_decay_time",
(void *)&old_muzzy_decay_time, &sz,
(void *)&muzzy_decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_muzzy_decay_time, prev_muzzy_decay_time,
"Unexpected old arena.0.muzzy_decay_time");
}
}
TEST_END
@ -522,32 +559,68 @@ TEST_BEGIN(test_arena_i_dss) {
}
TEST_END
TEST_BEGIN(test_arenas_decay_time) {
ssize_t decay_time, orig_decay_time, prev_decay_time;
TEST_BEGIN(test_arenas_dirty_decay_time) {
ssize_t dirty_decay_time, orig_dirty_decay_time, prev_dirty_decay_time;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
assert_d_eq(mallctl("arenas.dirty_decay_time",
(void *)&orig_dirty_decay_time, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
decay_time = -2;
assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
(void *)&decay_time, sizeof(ssize_t)), EFAULT,
dirty_decay_time = -2;
assert_d_eq(mallctl("arenas.dirty_decay_time", NULL, NULL,
(void *)&dirty_decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
decay_time = 0x7fffffff;
assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
(void *)&decay_time, sizeof(ssize_t)), 0,
dirty_decay_time = 0x7fffffff;
assert_d_eq(mallctl("arenas.dirty_decay_time", NULL, NULL,
(void *)&dirty_decay_time, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
for (prev_decay_time = decay_time, decay_time = -1;
decay_time < 20; prev_decay_time = decay_time, decay_time++) {
ssize_t old_decay_time;
for (prev_dirty_decay_time = dirty_decay_time, dirty_decay_time = -1;
dirty_decay_time < 20; prev_dirty_decay_time = dirty_decay_time,
dirty_decay_time++) {
ssize_t old_dirty_decay_time;
assert_d_eq(mallctl("arenas.decay_time",
(void *)&old_decay_time, &sz, (void *)&decay_time,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
assert_zd_eq(old_decay_time, prev_decay_time,
"Unexpected old arenas.decay_time");
assert_d_eq(mallctl("arenas.dirty_decay_time",
(void *)&old_dirty_decay_time, &sz,
(void *)&dirty_decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_dirty_decay_time, prev_dirty_decay_time,
"Unexpected old arenas.dirty_decay_time");
}
}
TEST_END
TEST_BEGIN(test_arenas_muzzy_decay_time) {
ssize_t muzzy_decay_time, orig_muzzy_decay_time, prev_muzzy_decay_time;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arenas.muzzy_decay_time",
(void *)&orig_muzzy_decay_time, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
muzzy_decay_time = -2;
assert_d_eq(mallctl("arenas.muzzy_decay_time", NULL, NULL,
(void *)&muzzy_decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
muzzy_decay_time = 0x7fffffff;
assert_d_eq(mallctl("arenas.muzzy_decay_time", NULL, NULL,
(void *)&muzzy_decay_time, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
for (prev_muzzy_decay_time = muzzy_decay_time, muzzy_decay_time = -1;
muzzy_decay_time < 20; prev_muzzy_decay_time = muzzy_decay_time,
muzzy_decay_time++) {
ssize_t old_muzzy_decay_time;
assert_d_eq(mallctl("arenas.muzzy_decay_time",
(void *)&old_muzzy_decay_time, &sz,
(void *)&muzzy_decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_muzzy_decay_time, prev_muzzy_decay_time,
"Unexpected old arenas.muzzy_decay_time");
}
}
TEST_END
@ -630,7 +703,8 @@ TEST_BEGIN(test_stats_arenas) {
TEST_STATS_ARENAS(unsigned, nthreads);
TEST_STATS_ARENAS(const char *, dss);
TEST_STATS_ARENAS(ssize_t, decay_time);
TEST_STATS_ARENAS(ssize_t, dirty_decay_time);
TEST_STATS_ARENAS(ssize_t, muzzy_decay_time);
TEST_STATS_ARENAS(size_t, pactive);
TEST_STATS_ARENAS(size_t, pdirty);
@ -653,11 +727,13 @@ main(void) {
test_tcache,
test_thread_arena,
test_arena_i_initialized,
test_arena_i_decay_time,
test_arena_i_dirty_decay_time,
test_arena_i_muzzy_decay_time,
test_arena_i_purge,
test_arena_i_decay,
test_arena_i_dss,
test_arenas_decay_time,
test_arenas_dirty_decay_time,
test_arenas_muzzy_decay_time,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lextent_constants,

View File

@ -1,4 +1,4 @@
#!/bin/sh
# Immediately purge to minimize fragmentation.
export MALLOC_CONF="decay_time:-1"
export MALLOC_CONF="dirty_decay_time:0,muzzy_decay_time:0"

View File

@ -71,7 +71,8 @@ TEST_BEGIN(test_stats_arenas_summary) {
size_t sz;
int expected = config_stats ? 0 : ENOENT;
size_t mapped;
uint64_t npurge, nmadvise, purged;
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0));
assert_ptr_not_null(little, "Unexpected mallocx() failure");
@ -92,19 +93,34 @@ TEST_BEGIN(test_stats_arenas_summary) {
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
0), expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL,
0), expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz,
NULL, 0), expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL,
0), expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.dirty_npurge",
(void *)&dirty_npurge, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
(void *)&dirty_nmadvise, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.dirty_purged",
(void *)&dirty_purged, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
(void *)&muzzy_npurge, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
(void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.muzzy_purged",
(void *)&muzzy_purged, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
if (config_stats) {
assert_u64_gt(npurge, 0,
assert_u64_gt(dirty_npurge + muzzy_npurge, 0,
"At least one purge should have occurred");
assert_u64_le(nmadvise, purged,
"nmadvise should be no greater than purged");
assert_u64_le(dirty_nmadvise, dirty_purged,
"dirty_nmadvise should be no greater than dirty_purged");
assert_u64_le(muzzy_nmadvise, muzzy_purged,
"muzzy_nmadvise should be no greater than muzzy_purged");
}
}
TEST_END