Implement decay-based unused dirty page purging.

This is an alternative to the existing ratio-based unused dirty page
purging, and is intended to eventually become the sole purging
mechanism.

Add mallctls:
- opt.purge
- opt.decay_time
- arena.<i>.decay
- arena.<i>.decay_time
- arenas.decay_time
- stats.arenas.<i>.decay_time

This resolves #325.
This commit is contained in:
Jason Evans 2016-02-19 20:09:31 -08:00
parent 8e82af1166
commit 243f7a0508
18 changed files with 1268 additions and 112 deletions

View File

@ -121,6 +121,7 @@ C_UTIL_INTEGRATION_SRCS := $(srcroot)src/time.c $(srcroot)src/util.c
TESTS_UNIT := $(srcroot)test/unit/atomic.c \ TESTS_UNIT := $(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \ $(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/hash.c \ $(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_alloc.c \
@ -354,18 +355,22 @@ stress_dir:
check_dir: check_unit_dir check_integration_dir check_dir: check_unit_dir check_integration_dir
check_unit: tests_unit check_unit_dir check_unit: tests_unit check_unit_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) $(MALLOC_CONF)="purge:ratio" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
$(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
check_integration_prof: tests_integration check_integration_dir check_integration_prof: tests_integration check_integration_dir
ifeq ($(enable_prof), 1) ifeq ($(enable_prof), 1)
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
endif endif
check_integration_decay: tests_integration check_integration_dir
$(MALLOC_CONF)="purge:decay,decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
$(MALLOC_CONF)="purge:decay,decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
$(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
check_integration: tests_integration check_integration_dir check_integration: tests_integration check_integration_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
stress: tests_stress stress_dir stress: tests_stress stress_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%) $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
check: tests check_dir check_integration_prof check: check_unit check_integration check_integration_decay check_integration_prof
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
ifeq ($(enable_code_coverage), 1) ifeq ($(enable_code_coverage), 1)
coverage_unit: check_unit coverage_unit: check_unit

View File

@ -949,6 +949,20 @@ for (i = 0; i < nbins; i++) {
number of CPUs, or one if there is a single CPU.</para></listitem> number of CPUs, or one if there is a single CPU.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.purge">
<term>
<mallctl>opt.purge</mallctl>
(<type>const char *</type>)
<literal>r-</literal>
</term>
<listitem><para>Purge mode is &ldquo;ratio&rdquo; (default) or
&ldquo;decay&rdquo;. See <link
linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
for details of the ratio mode. See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
details of the decay mode.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_dirty_mult"> <varlistentry id="opt.lg_dirty_mult">
<term> <term>
<mallctl>opt.lg_dirty_mult</mallctl> <mallctl>opt.lg_dirty_mult</mallctl>
@ -971,6 +985,26 @@ for (i = 0; i < nbins; i++) {
for related dynamic control options.</para></listitem> for related dynamic control options.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.decay_time">
<term>
<mallctl>opt.decay_time</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in seconds from the creation of a set
of unused dirty pages until an equivalent set of unused dirty pages is
purged and/or reused. The pages are incrementally purged according to a
sigmoidal decay curve that starts and ends with zero purge rate. A
decay time of 0 causes all unused dirty pages to be purged immediately
upon creation. A decay time of -1 disables purging. The default decay
time is 10 seconds. See <link
linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
and <link
linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
for related dynamic control options.
</para></listitem>
</varlistentry>
<varlistentry id="opt.stats_print"> <varlistentry id="opt.stats_print">
<term> <term>
<mallctl>opt.stats_print</mallctl> <mallctl>opt.stats_print</mallctl>
@ -1501,12 +1535,27 @@ malloc_conf = "xmalloc:true";]]></programlisting>
(<type>void</type>) (<type>void</type>)
<literal>--</literal> <literal>--</literal>
</term> </term>
<listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
all arenas if &lt;i&gt; equals <link all arenas if &lt;i&gt; equals <link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.decay">
<term>
<mallctl>arena.&lt;i&gt;.decay</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Trigger decay-based purging of unused dirty pages for
arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals <link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
The proportion of unused dirty pages to be purged depends on the current
time; see <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
details.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.dss"> <varlistentry id="arena.i.dss">
<term> <term>
<mallctl>arena.&lt;i&gt;.dss</mallctl> <mallctl>arena.&lt;i&gt;.dss</mallctl>
@ -1535,6 +1584,22 @@ malloc_conf = "xmalloc:true";]]></programlisting>
for additional information.</para></listitem> for additional information.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.decay_time">
<term>
<mallctl>arena.&lt;i&gt;.decay_time</mallctl>
(<type>ssize_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Current per-arena approximate time in seconds from the
creation of a set of unused dirty pages until an equivalent set of
unused dirty pages is purged and/or reused. Each time this interface is
set, all currently unused dirty pages are considered to have fully
decayed, which causes immediate purging of all unused dirty pages unless
the decay time is set to -1 (i.e. purging disabled). See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
additional information.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.chunk_hooks"> <varlistentry id="arena.i.chunk_hooks">
<term> <term>
<mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl> <mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl>
@ -1769,6 +1834,21 @@ typedef struct {
for additional information.</para></listitem> for additional information.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.decay_time">
<term>
<mallctl>arenas.decay_time</mallctl>
(<type>ssize_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Current default per-arena approximate time in seconds
from the creation of a set of unused dirty pages until an equivalent set
of unused dirty pages is purged and/or reused, used to initialize <link
linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
during arena creation. See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
additional information.</para></listitem>
</varlistentry>
<varlistentry id="arenas.quantum"> <varlistentry id="arenas.quantum">
<term> <term>
<mallctl>arenas.quantum</mallctl> <mallctl>arenas.quantum</mallctl>
@ -2113,6 +2193,19 @@ typedef struct {
for details.</para></listitem> for details.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.decay_time">
<term>
<mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in seconds from the creation of a set
of unused dirty pages until an equivalent set of unused dirty pages is
purged and/or reused. See <link
linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
for details.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.nthreads"> <varlistentry id="stats.arenas.i.nthreads">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl> <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>

View File

@ -23,6 +23,18 @@
*/ */
#define LG_DIRTY_MULT_DEFAULT 3 #define LG_DIRTY_MULT_DEFAULT 3
typedef enum {
purge_mode_ratio = 0,
purge_mode_decay = 1,
purge_mode_limit = 2
} purge_mode_t;
#define PURGE_DEFAULT purge_mode_ratio
/* Default decay time in seconds. */
#define DECAY_TIME_DEFAULT 10
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
typedef struct arena_run_s arena_run_t; typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
@ -325,7 +337,7 @@ struct arena_s {
/* Minimum ratio (log base 2) of nactive:ndirty. */ /* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t lg_dirty_mult; ssize_t lg_dirty_mult;
/* True if a thread is currently executing arena_purge(). */ /* True if a thread is currently executing arena_purge_to_limit(). */
bool purging; bool purging;
/* Number of pages in active runs and huge regions. */ /* Number of pages in active runs and huge regions. */
@ -376,6 +388,53 @@ struct arena_s {
arena_runs_dirty_link_t runs_dirty; arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache; extent_node_t chunks_cache;
/*
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
ssize_t decay_time;
/* decay_time / SMOOTHSTEP_NSTEPS. */
struct timespec decay_interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
struct timespec decay_epoch;
/* decay_deadline randomness generator. */
uint64_t decay_jitter_state;
/*
* Deadline for current epoch. This is the sum of decay_interval and
* per epoch jitter which is a uniform random variable in
* [0..decay_interval). Epochs always advance by precise multiples of
* decay_interval, but we randomize the deadline to reduce the
* likelihood of arenas purging in lockstep.
*/
struct timespec decay_deadline;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between decay_ndirty and ndirty to
* determine how many dirty pages, if any, were generated, and record
* the result in decay_backlog.
*/
size_t decay_ndirty;
/*
* Memoized result of arena_decay_backlog_npages_limit() corresponding
* to the current contents of decay_backlog, i.e. the limit on how many
* pages are allowed to exist for the decay epochs.
*/
size_t decay_backlog_npages_limit;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to decay_epoch.
*/
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
/* Extant huge allocations. */ /* Extant huge allocations. */
ql_head(extent_node_t) huge; ql_head(extent_node_t) huge;
/* Synchronizes all huge allocation/update/deallocation. */ /* Synchronizes all huge allocation/update/deallocation. */
@ -408,6 +467,7 @@ struct arena_s {
/* Used in conjunction with tsd for fast arena-related context lookup. */ /* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s { struct arena_tdata_s {
arena_t *arena; arena_t *arena;
ticker_t decay_ticker;
}; };
#endif /* JEMALLOC_ARENA_STRUCTS_B */ #endif /* JEMALLOC_ARENA_STRUCTS_B */
@ -423,7 +483,10 @@ static const size_t large_pad =
#endif #endif
; ;
extern purge_mode_t opt_purge;
extern const char *purge_mode_names[];
extern ssize_t opt_lg_dirty_mult; extern ssize_t opt_lg_dirty_mult;
extern ssize_t opt_decay_time;
extern arena_bin_info_t arena_bin_info[NBINS]; extern arena_bin_info_t arena_bin_info[NBINS];
@ -451,9 +514,11 @@ bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero); size_t oldsize, size_t usize, bool *zero);
ssize_t arena_lg_dirty_mult_get(arena_t *arena); ssize_t arena_lg_dirty_mult_get(arena_t *arena);
bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
ssize_t arena_decay_time_get(arena_t *arena);
bool arena_decay_time_set(arena_t *arena, ssize_t decay_time);
void arena_maybe_purge(arena_t *arena); void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena); void arena_purge(arena_t *arena, bool all);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes); szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero); bool zero);
@ -467,7 +532,7 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif #endif
void arena_quarantine_junk_small(void *ptr, size_t usize); void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_large(arena_t *arena, size_t size, void *arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size,
szind_t ind, bool zero); szind_t ind, bool zero);
void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache); bool zero, tcache_t *tcache);
@ -478,8 +543,8 @@ void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr, arena_chunk_map_bits_t *bitselm); void *ptr, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm); size_t pageind, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
size_t pageind); void *ptr, size_t pageind);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t); typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
@ -488,12 +553,13 @@ void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif #endif
void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr); void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
void *ptr);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif #endif
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero); size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache); size_t size, size_t alignment, bool zero, tcache_t *tcache);
@ -501,9 +567,11 @@ dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void); ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
ssize_t arena_decay_time_default_get(void);
bool arena_decay_time_default_set(ssize_t decay_time);
void arena_stats_merge(arena_t *arena, const char **dss, void arena_stats_merge(arena_t *arena, const char **dss,
ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
arena_stats_t *astats, malloc_bin_stats_t *bstats, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
arena_t *arena_new(unsigned ind); arena_t *arena_new(unsigned ind);
bool arena_boot(void); bool arena_boot(void);
@ -566,6 +634,8 @@ prof_tctx_t *arena_prof_tctx_get(const void *ptr);
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void arena_prof_tctx_reset(const void *ptr, size_t usize, void arena_prof_tctx_reset(const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx); const void *old_ptr, prof_tctx_t *old_tctx);
void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
void arena_decay_tick(tsd_t *tsd, arena_t *arena);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path); bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(const void *ptr); arena_t *arena_aalloc(const void *ptr);
@ -1165,6 +1235,27 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
} }
} }
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
{
ticker_t *decay_ticker;
if (unlikely(tsd == NULL))
return;
decay_ticker = decay_ticker_get(tsd, arena->ind);
if (unlikely(decay_ticker == NULL))
return;
if (unlikely(ticker_ticks(decay_ticker, nticks)))
arena_purge(arena, false);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsd_t *tsd, arena_t *arena)
{
arena_decay_ticks(tsd, arena, 1);
}
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero, arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) tcache_t *tcache, bool slow_path)
@ -1271,7 +1362,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
tcache_dalloc_small(tsd, tcache, ptr, binind, tcache_dalloc_small(tsd, tcache, ptr, binind,
slow_path); slow_path);
} else { } else {
arena_dalloc_small(extent_node_arena_get( arena_dalloc_small(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr, pageind); &chunk->node), chunk, ptr, pageind);
} }
} else { } else {
@ -1286,7 +1377,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
tcache_dalloc_large(tsd, tcache, ptr, size - tcache_dalloc_large(tsd, tcache, ptr, size -
large_pad, slow_path); large_pad, slow_path);
} else { } else {
arena_dalloc_large(extent_node_arena_get( arena_dalloc_large(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr); &chunk->node), chunk, ptr);
} }
} }
@ -1326,7 +1417,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
} else { } else {
size_t pageind = ((uintptr_t)ptr - size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE; (uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(extent_node_arena_get( arena_dalloc_small(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr, pageind); &chunk->node), chunk, ptr, pageind);
} }
} else { } else {
@ -1337,7 +1428,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
tcache_dalloc_large(tsd, tcache, ptr, size, tcache_dalloc_large(tsd, tcache, ptr, size,
true); true);
} else { } else {
arena_dalloc_large(extent_node_arena_get( arena_dalloc_large(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr); &chunk->node), chunk, ptr);
} }
} }

View File

@ -35,6 +35,7 @@ struct ctl_arena_stats_s {
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
ssize_t lg_dirty_mult; ssize_t lg_dirty_mult;
ssize_t decay_time;
size_t pactive; size_t pactive;
size_t pdirty; size_t pdirty;
arena_stats_t astats; arena_stats_t astats;

View File

@ -13,8 +13,8 @@ void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache); tcache_t *tcache);
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero, tcache_t *tcache); bool zero, tcache_t *tcache);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
size_t usize_max, bool zero); size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache); size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET

View File

@ -545,6 +545,7 @@ arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing); bool refresh_if_missing);
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing); bool refresh_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@ -833,6 +834,17 @@ arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
return (tdata->arena); return (tdata->arena);
} }
JEMALLOC_INLINE ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind)
{
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL))
return (NULL);
return (&tdata->decay_ticker);
}
#endif #endif
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
@ -883,8 +895,8 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero); size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero); size_t extra, size_t alignment, bool zero);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@ -1150,8 +1162,8 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero) size_t alignment, bool zero)
{ {
assert(ptr != NULL); assert(ptr != NULL);
@ -1163,7 +1175,7 @@ ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
return (true); return (true);
} }
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
} }
#endif #endif

View File

@ -25,6 +25,12 @@ arena_dalloc_junk_small
arena_dalloc_large arena_dalloc_large
arena_dalloc_large_junked_locked arena_dalloc_large_junked_locked
arena_dalloc_small arena_dalloc_small
arena_decay_time_default_get
arena_decay_time_default_set
arena_decay_time_get
arena_decay_time_set
arena_decay_tick
arena_decay_ticks
arena_dss_prec_get arena_dss_prec_get
arena_dss_prec_set arena_dss_prec_set
arena_get arena_get
@ -83,7 +89,7 @@ arena_prof_tctx_get
arena_prof_tctx_reset arena_prof_tctx_reset
arena_prof_tctx_set arena_prof_tctx_set
arena_ptr_small_binind_get arena_ptr_small_binind_get
arena_purge_all arena_purge
arena_quarantine_junk_small arena_quarantine_junk_small
arena_ralloc arena_ralloc
arena_ralloc_junk_large arena_ralloc_junk_large
@ -185,6 +191,7 @@ ctl_nametomib
ctl_postfork_child ctl_postfork_child
ctl_postfork_parent ctl_postfork_parent
ctl_prefork ctl_prefork
decay_ticker_get
dss_prec_names dss_prec_names
extent_node_achunk_get extent_node_achunk_get
extent_node_achunk_set extent_node_achunk_set
@ -318,6 +325,7 @@ narenas_total_get
ncpus ncpus
nhbins nhbins
opt_abort opt_abort
opt_decay_time
opt_dss opt_dss
opt_junk opt_junk
opt_junk_alloc opt_junk_alloc
@ -336,6 +344,7 @@ opt_prof_gdump
opt_prof_leak opt_prof_leak
opt_prof_prefix opt_prof_prefix
opt_prof_thread_active_init opt_prof_thread_active_init
opt_purge
opt_quarantine opt_quarantine
opt_redzone opt_redzone
opt_stats_print opt_stats_print
@ -397,6 +406,7 @@ prof_thread_active_init_set
prof_thread_active_set prof_thread_active_set
prof_thread_name_get prof_thread_name_get
prof_thread_name_set prof_thread_name_set
purge_mode_names
quarantine quarantine
quarantine_alloc_hook quarantine_alloc_hook
quarantine_alloc_hook_work quarantine_alloc_hook_work

View File

@ -361,7 +361,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
usize = index2size(binind); usize = index2size(binind);
assert(usize <= tcache_maxclass); assert(usize <= tcache_maxclass);
ret = arena_malloc_large(arena, usize, binind, zero); ret = arena_malloc_large(tsd, arena, usize, binind, zero);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
} else { } else {

View File

@ -26,7 +26,12 @@ void time_imultiply(struct timespec *time, uint64_t multiplier);
void time_idivide(struct timespec *time, uint64_t divisor); void time_idivide(struct timespec *time, uint64_t divisor);
uint64_t time_divide(const struct timespec *time, uint64_t time_divide(const struct timespec *time,
const struct timespec *divisor); const struct timespec *divisor);
#ifdef JEMALLOC_JET
typedef bool (time_update_t)(struct timespec *);
extern time_update_t *time_update;
#else
bool time_update(struct timespec *time); bool time_update(struct timespec *time);
#endif
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/

View File

@ -4,8 +4,17 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
purge_mode_t opt_purge = PURGE_DEFAULT;
const char *purge_mode_names[] = {
"ratio",
"decay",
"N/A"
};
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static ssize_t lg_dirty_mult_default; static ssize_t lg_dirty_mult_default;
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
arena_bin_info_t arena_bin_info[NBINS]; arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias; size_t map_bias;
@ -1205,10 +1214,193 @@ arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
return (false); return (false);
} }
static void
arena_decay_deadline_init(arena_t *arena)
{
assert(opt_purge == purge_mode_decay);
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
time_copy(&arena->decay_deadline, &arena->decay_epoch);
time_add(&arena->decay_deadline, &arena->decay_interval);
if (arena->decay_time > 0) {
uint64_t decay_interval_ns, r;
struct timespec jitter;
decay_interval_ns = time_sec(&arena->decay_interval) *
1000000000 + time_nsec(&arena->decay_interval);
r = prng_range(&arena->decay_jitter_state, decay_interval_ns);
time_init(&jitter, r / 1000000000, r % 1000000000);
time_add(&arena->decay_deadline, &jitter);
}
}
static bool
arena_decay_deadline_reached(const arena_t *arena, const struct timespec *time)
{
assert(opt_purge == purge_mode_decay);
return (time_compare(&arena->decay_deadline, time) <= 0);
}
static size_t
arena_decay_backlog_npages_limit(const arena_t *arena)
{
static const uint64_t h_steps[] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
assert(opt_purge == purge_mode_decay);
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay_backlog[i] * h_steps[i];
npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
static void
arena_decay_epoch_advance(arena_t *arena, const struct timespec *time)
{
uint64_t nadvance;
struct timespec delta;
size_t ndirty_delta;
assert(opt_purge == purge_mode_decay);
assert(arena_decay_deadline_reached(arena, time));
time_copy(&delta, time);
time_subtract(&delta, &arena->decay_epoch);
nadvance = time_divide(&delta, &arena->decay_interval);
assert(nadvance > 0);
/* Add nadvance decay intervals to epoch. */
time_copy(&delta, &arena->decay_interval);
time_imultiply(&delta, nadvance);
time_add(&arena->decay_epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
if (nadvance >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
(SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
if (nadvance > 1) {
memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
nadvance], 0, (nadvance-1) * sizeof(size_t));
}
}
ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
arena->decay_ndirty : 0;
arena->decay_ndirty = arena->ndirty;
arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
arena->decay_backlog_npages_limit =
arena_decay_backlog_npages_limit(arena);
}
static size_t
arena_decay_npages_limit(arena_t *arena)
{
size_t npages_limit;
assert(opt_purge == purge_mode_decay);
npages_limit = arena->decay_backlog_npages_limit;
/* Add in any dirty pages created during the current epoch. */
if (arena->ndirty > arena->decay_ndirty)
npages_limit += arena->ndirty - arena->decay_ndirty;
return (npages_limit);
}
static void
arena_decay_init(arena_t *arena, ssize_t decay_time)
{
arena->decay_time = decay_time;
if (decay_time > 0) {
time_init(&arena->decay_interval, decay_time, 0);
time_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
}
time_init(&arena->decay_epoch, 0, 0);
time_update(&arena->decay_epoch);
arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
arena_decay_deadline_init(arena);
arena->decay_ndirty = arena->ndirty;
arena->decay_backlog_npages_limit = 0;
memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
arena_decay_time_valid(ssize_t decay_time)
{
return (decay_time >= -1 && decay_time <= TIME_SEC_MAX);
}
ssize_t
arena_decay_time_get(arena_t *arena)
{
ssize_t decay_time;
malloc_mutex_lock(&arena->lock);
decay_time = arena->decay_time;
malloc_mutex_unlock(&arena->lock);
return (decay_time);
}
bool
arena_decay_time_set(arena_t *arena, ssize_t decay_time)
{
if (!arena_decay_time_valid(decay_time))
return (true);
malloc_mutex_lock(&arena->lock);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_init(arena, decay_time);
arena_maybe_purge(arena);
malloc_mutex_unlock(&arena->lock);
return (false);
}
static void static void
arena_maybe_purge_ratio(arena_t *arena) arena_maybe_purge_ratio(arena_t *arena)
{ {
assert(opt_purge == purge_mode_ratio);
/* Don't purge if the option is disabled. */ /* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0) if (arena->lg_dirty_mult < 0)
return; return;
@ -1231,6 +1423,41 @@ arena_maybe_purge_ratio(arena_t *arena)
} }
} }
static void
arena_maybe_purge_decay(arena_t *arena)
{
struct timespec time;
size_t ndirty_limit;
assert(opt_purge == purge_mode_decay);
/* Purge all or nothing if the option is disabled. */
if (arena->decay_time <= 0) {
if (arena->decay_time == 0)
arena_purge_to_limit(arena, 0);
return;
}
time_copy(&time, &arena->decay_epoch);
if (unlikely(time_update(&time))) {
/* Time went backwards. Force an epoch advance. */
time_copy(&time, &arena->decay_deadline);
}
if (arena_decay_deadline_reached(arena, &time))
arena_decay_epoch_advance(arena, &time);
ndirty_limit = arena_decay_npages_limit(arena);
/*
* Don't try to purge unless the number of purgeable pages exceeds the
* current limit.
*/
if (arena->ndirty <= ndirty_limit)
return;
arena_purge_to_limit(arena, ndirty_limit);
}
void void
arena_maybe_purge(arena_t *arena) arena_maybe_purge(arena_t *arena)
{ {
@ -1239,7 +1466,10 @@ arena_maybe_purge(arena_t *arena)
if (arena->purging) if (arena->purging)
return; return;
if (opt_purge == purge_mode_ratio)
arena_maybe_purge_ratio(arena); arena_maybe_purge_ratio(arena);
else
arena_maybe_purge_decay(arena);
} }
static size_t static size_t
@ -1298,6 +1528,9 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
UNUSED void *chunk; UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE; npages = extent_node_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
chunkselm_next = qr_next(chunkselm, cc_link); chunkselm_next = qr_next(chunkselm, cc_link);
/* /*
@ -1327,6 +1560,9 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_mapbits_unallocated_size_get(chunk, pageind); arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE; npages = run_size >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) == assert(arena_mapbits_dirty_get(chunk, pageind) ==
@ -1352,7 +1588,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
nstashed += npages; nstashed += npages;
if (arena->ndirty - nstashed <= ndirty_limit) if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
ndirty_limit)
break; break;
} }
@ -1492,6 +1729,15 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
} }
/*
* NB: ndirty_limit is interpreted differently depending on opt_purge:
* - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
* desired state:
* (arena->ndirty <= ndirty_limit)
* - purge_mode_decay: Purge as many dirty runs/chunks as possible without
* violating the invariant:
* (arena->ndirty >= ndirty_limit)
*/
static void static void
arena_purge_to_limit(arena_t *arena, size_t ndirty_limit) arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
{ {
@ -1510,8 +1756,8 @@ arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
size_t ndirty = arena_dirty_count(arena); size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty); assert(ndirty == arena->ndirty);
} }
assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || assert(opt_purge != purge_mode_ratio || (arena->nactive >>
ndirty_limit == 0); arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
qr_new(&purge_runs_sentinel, rd_link); qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel); extent_node_dirty_linkage_init(&purge_chunks_sentinel);
@ -1534,11 +1780,14 @@ label_return:
} }
void void
arena_purge_all(arena_t *arena) arena_purge(arena_t *arena, bool all)
{ {
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (all)
arena_purge_to_limit(arena, 0); arena_purge_to_limit(arena, 0);
else
arena_maybe_purge(arena);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
} }
@ -1960,8 +2209,8 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
} }
void void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
uint64_t prof_accumbytes) szind_t binind, uint64_t prof_accumbytes)
{ {
unsigned i, nfill; unsigned i, nfill;
arena_bin_t *bin; arena_bin_t *bin;
@ -2008,6 +2257,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
tbin->ncached = i; tbin->ncached = i;
arena_decay_tick(tsd, arena);
} }
void void
@ -2118,7 +2368,8 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
} }
static void * static void *
arena_malloc_small(arena_t *arena, size_t size, szind_t binind, bool zero) arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
bool zero)
{ {
void *ret; void *ret;
arena_bin_t *bin; arena_bin_t *bin;
@ -2166,11 +2417,13 @@ arena_malloc_small(arena_t *arena, size_t size, szind_t binind, bool zero)
memset(ret, 0, size); memset(ret, 0, size);
} }
arena_decay_tick(tsd, arena);
return (ret); return (ret);
} }
void * void *
arena_malloc_large(arena_t *arena, size_t size, szind_t binind, bool zero) arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
bool zero)
{ {
void *ret; void *ret;
size_t usize; size_t usize;
@ -2227,6 +2480,7 @@ arena_malloc_large(arena_t *arena, size_t size, szind_t binind, bool zero)
} }
} }
arena_decay_tick(tsd, arena);
return (ret); return (ret);
} }
@ -2240,9 +2494,9 @@ arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
return (NULL); return (NULL);
if (likely(size <= SMALL_MAXCLASS)) if (likely(size <= SMALL_MAXCLASS))
return (arena_malloc_small(arena, size, ind, zero)); return (arena_malloc_small(tsd, arena, size, ind, zero));
if (likely(size <= large_maxclass)) if (likely(size <= large_maxclass))
return (arena_malloc_large(arena, size, ind, zero)); return (arena_malloc_large(tsd, arena, size, ind, zero));
return (huge_malloc(tsd, arena, size, zero, tcache)); return (huge_malloc(tsd, arena, size, zero, tcache));
} }
@ -2329,6 +2583,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
else if (unlikely(opt_zero)) else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
} }
arena_decay_tick(tsd, arena);
return (ret); return (ret);
} }
@ -2515,7 +2770,7 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
} }
void void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind) size_t pageind)
{ {
arena_chunk_map_bits_t *bitselm; arena_chunk_map_bits_t *bitselm;
@ -2527,6 +2782,7 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
} }
bitselm = arena_bitselm_get(chunk, pageind); bitselm = arena_bitselm_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
arena_decay_tick(tsd, arena);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
@ -2583,12 +2839,13 @@ arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
} }
void void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
{ {
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
arena_dalloc_large_locked_impl(arena, chunk, ptr, false); arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
arena_decay_tick(tsd, arena);
} }
static void static void
@ -2789,14 +3046,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
} }
bool bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
bool zero) size_t extra, bool zero)
{ {
size_t usize_min, usize_max; size_t usize_min, usize_max;
usize_min = s2u(size); usize_min = s2u(size);
usize_max = s2u(size + extra); usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
arena_chunk_t *chunk;
/* /*
* Avoid moving the allocation if the size class can be left the * Avoid moving the allocation if the size class can be left the
* same. * same.
@ -2816,10 +3075,12 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
return (true); return (true);
} }
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
return (false); return (false);
} else { } else {
return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
zero)); usize_max, zero));
} }
} }
@ -2852,7 +3113,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
return (ptr); return (ptr);
/* /*
@ -2915,15 +3176,36 @@ bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
{ {
if (opt_purge != purge_mode_ratio)
return (true);
if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true); return (true);
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
return (false); return (false);
} }
ssize_t
arena_decay_time_default_get(void)
{
return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
}
bool
arena_decay_time_default_set(ssize_t decay_time)
{
if (opt_purge != purge_mode_decay)
return (true);
if (!arena_decay_time_valid(decay_time))
return (true);
atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
return (false);
}
void void
arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
size_t *nactive, size_t *ndirty, arena_stats_t *astats, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats) malloc_huge_stats_t *hstats)
{ {
@ -2932,6 +3214,7 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec]; *dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult; *lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay_time;
*nactive += arena->nactive; *nactive += arena->nactive;
*ndirty += arena->ndirty; *ndirty += arena->ndirty;
@ -3050,6 +3333,9 @@ arena_new(unsigned ind)
qr_new(&arena->runs_dirty, rd_link); qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link); qr_new(&arena->chunks_cache, cc_link);
if (opt_purge == purge_mode_decay)
arena_decay_init(arena, arena_decay_time_default_get());
ql_new(&arena->huge); ql_new(&arena->huge);
if (malloc_mutex_init(&arena->huge_mtx)) if (malloc_mutex_init(&arena->huge_mtx))
return (NULL); return (NULL);
@ -3227,6 +3513,7 @@ arena_boot(void)
unsigned i; unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
arena_decay_time_default_set(opt_decay_time);
/* /*
* Compute the header size such that it is large enough to contain the * Compute the header size such that it is large enough to contain the

148
src/ctl.c
View File

@ -92,7 +92,9 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_purge)
CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk) CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero) CTL_PROTO(opt_zero)
@ -115,10 +117,12 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create) CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush) CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy) CTL_PROTO(tcache_destroy)
static void arena_i_purge(unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_purge)
static void arena_i_purge(unsigned arena_ind); CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult) CTL_PROTO(arena_i_lg_dirty_mult)
CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_chunk_hooks) CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i) INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
@ -132,6 +136,7 @@ INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_lg_dirty_mult) CTL_PROTO(arenas_lg_dirty_mult)
CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page) CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_tcache_max)
@ -182,6 +187,7 @@ INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult) CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_mapped)
@ -260,7 +266,9 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("purge"), CTL(opt_purge)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
{NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)}, {NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)}, {NAME("zero"), CTL(opt_zero)},
@ -290,8 +298,10 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = { static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)}, {NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("dss"), CTL(arena_i_dss)}, {NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
}; };
static const ctl_named_node_t super_arena_i_node[] = { static const ctl_named_node_t super_arena_i_node[] = {
@ -341,6 +351,7 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)}, {NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)}, {NAME("initialized"), CTL(arenas_initialized)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
{NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)}, {NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)}, {NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("tcache_max"), CTL(arenas_tcache_max)},
@ -441,6 +452,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)},
@ -523,6 +535,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
astats->dss = dss_prec_names[dss_prec_limit]; astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1; astats->lg_dirty_mult = -1;
astats->decay_time = -1;
astats->pactive = 0; astats->pactive = 0;
astats->pdirty = 0; astats->pdirty = 0;
if (config_stats) { if (config_stats) {
@ -545,8 +558,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
unsigned i; unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
&cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, &cstats->decay_time, &cstats->pactive, &cstats->pdirty,
cstats->lstats, cstats->hstats); &cstats->astats, cstats->bstats, cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs * cstats->allocated_small += cstats->bstats[i].curregs *
@ -1265,7 +1278,9 @@ CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
@ -1539,34 +1554,52 @@ label_return:
/******************************************************************************/ /******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void static void
arena_i_purge(unsigned arena_ind) arena_i_purge(unsigned arena_ind, bool all)
{ {
tsd_t *tsd;
malloc_mutex_lock(&ctl_mtx);
{
tsd_t *tsd = tsd_fetch();
unsigned narenas = ctl_stats.narenas;
if (arena_ind == narenas) {
unsigned i; unsigned i;
bool refreshed; bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); VARIABLE_ARRAY(arena_t *, tarenas, narenas);
tsd = tsd_fetch(); for (i = 0, refreshed = false; i < narenas; i++) {
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
tarenas[i] = arena_get(tsd, i, false, false); tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) { if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true); tarenas[i] = arena_get(tsd, i, false,
true);
refreshed = true; refreshed = true;
} }
} }
if (arena_ind == ctl_stats.narenas) { /*
unsigned i; * No further need to hold ctl_mtx, since narenas and
for (i = 0; i < ctl_stats.narenas; i++) { * tarenas contain everything needed below.
*/
malloc_mutex_unlock(&ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL) if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]); arena_purge(tarenas[i], all);
} }
} else { } else {
assert(arena_ind < ctl_stats.narenas); arena_t *tarena;
if (tarenas[arena_ind] != NULL)
arena_purge_all(tarenas[arena_ind]); assert(arena_ind < narenas);
tarena = arena_get(tsd, arena_ind, false, true);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(&ctl_mtx);
if (tarena != NULL)
arena_purge(tarena, all);
}
} }
} }
@ -1578,9 +1611,22 @@ arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
READONLY(); READONLY();
WRITEONLY(); WRITEONLY();
malloc_mutex_lock(&ctl_mtx); arena_i_purge(mib[1], true);
arena_i_purge(mib[1]);
malloc_mutex_unlock(&ctl_mtx); ret = 0;
label_return:
return (ret);
}
static int
arena_i_decay_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
arena_i_purge(mib[1], false);
ret = 0; ret = 0;
label_return: label_return:
@ -1677,6 +1723,40 @@ label_return:
return (ret); return (ret);
} }
static int
arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = mib[1];
arena_t *arena;
arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_get(arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_set(arena, *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
static int static int
arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
@ -1801,6 +1881,32 @@ label_return:
return (ret); return (ret);
} }
static int
arenas_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_default_get();
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_default_set(*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@ -2002,6 +2108,8 @@ CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t) ssize_t)
CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)

View File

@ -99,6 +99,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
} else if (config_fill && unlikely(opt_junk_alloc)) } else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, 0xa5, size); memset(ret, 0xa5, size);
arena_decay_tick(tsd, arena);
return (ret); return (ret);
} }
@ -280,7 +281,7 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
} }
bool bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero) size_t usize_max, bool zero)
{ {
@ -292,14 +293,19 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */ /* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
zero)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
return (false); return (false);
}
/* Try again, this time with usize_min. */ /* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) > if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
oldsize, usize_min, zero)) oldsize, usize_min, zero)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
return (false); return (false);
} }
}
/* /*
* Avoid moving the allocation if the existing chunk size accommodates * Avoid moving the allocation if the existing chunk size accommodates
@ -309,12 +315,17 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
zero); zero);
arena_decay_tick(tsd, huge_aalloc(ptr));
return (false); return (false);
} }
/* Attempt to shrink the allocation in-place. */ /* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
return (false);
}
}
return (true); return (true);
} }
@ -336,7 +347,7 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
return (ptr); return (ptr);
/* /*
@ -373,6 +384,8 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
arena_chunk_dalloc_huge(extent_node_arena_get(node), arena_chunk_dalloc_huge(extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node)); extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsd, node, tcache, true, true); idalloctm(tsd, node, tcache, true, true);
arena_decay_tick(tsd, arena);
} }
arena_t * arena_t *

View File

@ -577,6 +577,17 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
* (narenas_tdata - narenas_actual)); * (narenas_tdata - narenas_actual));
} }
/* Copy/initialize tickers. */
for (i = 0; i < narenas_actual; i++) {
if (i < narenas_tdata_old) {
ticker_copy(&arenas_tdata[i].decay_ticker,
&arenas_tdata_old[i].decay_ticker);
} else {
ticker_init(&arenas_tdata[i].decay_ticker,
DECAY_NTICKS_PER_UPDATE);
}
}
/* Read the refreshed tdata array. */ /* Read the refreshed tdata array. */
tdata = &arenas_tdata[ind]; tdata = &arenas_tdata[ind];
label_return: label_return:
@ -1120,8 +1131,27 @@ malloc_conf_init(void)
} }
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX, false) SIZE_T_MAX, false)
if (strncmp("purge", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < purge_mode_limit; i++) {
if (strncmp(purge_mode_names[i], v,
vlen) == 0) {
opt_purge = (purge_mode_t)i;
match = true;
break;
}
}
if (!match) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1) -1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
TIME_SEC_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) { if (config_fill) {
if (CONF_MATCH("junk")) { if (CONF_MATCH("junk")) {
@ -2344,12 +2374,12 @@ label_oom:
} }
JEMALLOC_ALWAYS_INLINE_C size_t JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
size_t alignment, bool zero) size_t extra, size_t alignment, bool zero)
{ {
size_t usize; size_t usize;
if (ixalloc(ptr, old_usize, size, extra, alignment, zero)) if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
return (old_usize); return (old_usize);
usize = isalloc(ptr, config_prof); usize = isalloc(ptr, config_prof);
@ -2357,14 +2387,15 @@ ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
} }
static size_t static size_t
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
size_t alignment, bool zero, prof_tctx_t *tctx) size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
{ {
size_t usize; size_t usize;
if (tctx == NULL) if (tctx == NULL)
return (old_usize); return (old_usize);
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
zero);
return (usize); return (usize);
} }
@ -2390,11 +2421,11 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
assert(usize_max != 0); assert(usize_max != 0);
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(ptr, old_usize, size, extra, usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
alignment, zero, tctx); alignment, zero, tctx);
} else { } else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
zero); alignment, zero);
} }
if (usize == old_usize) { if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false); prof_alloc_rollback(tsd, tctx, false);
@ -2441,8 +2472,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero); alignment, zero);
} else { } else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
zero); alignment, zero);
} }
if (unlikely(usize == old_usize)) if (unlikely(usize == old_usize))
goto label_not_resized; goto label_not_resized;

View File

@ -258,7 +258,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
{ {
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
ssize_t lg_dirty_mult; ssize_t lg_dirty_mult, decay_time;
size_t page, pactive, pdirty, mapped; size_t page, pactive, pdirty, mapped;
size_t metadata_mapped, metadata_allocated; size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged; uint64_t npurge, nmadvise, purged;
@ -278,6 +278,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss); dss);
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
if (opt_purge == purge_mode_ratio) {
if (lg_dirty_mult >= 0) { if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: %u:1\n", "min active:dirty page ratio: %u:1\n",
@ -286,15 +287,23 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: N/A\n"); "min active:dirty page ratio: N/A\n");
} }
}
CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
if (opt_purge == purge_mode_decay) {
if (decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
decay_time);
} else
malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
}
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64 "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64", "
" madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge == "purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc" " allocated nmalloc ndalloc"
@ -486,7 +495,13 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss) OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas) OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult) OPT_WRITE_CHAR_P(purge)
if (opt_purge == purge_mode_ratio) {
OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
arenas.lg_dirty_mult)
}
if (opt_purge == purge_mode_decay)
OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time)
OPT_WRITE_BOOL(stats_print) OPT_WRITE_BOOL(stats_print)
OPT_WRITE_CHAR_P(junk) OPT_WRITE_CHAR_P(junk)
OPT_WRITE_SIZE_T(quarantine) OPT_WRITE_SIZE_T(quarantine)
@ -531,13 +546,22 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
if (opt_purge == purge_mode_ratio) {
if (ssv >= 0) { if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n", "Min active:dirty page ratio per arena: "
(1U << ssv)); "%u:1\n", (1U << ssv));
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n"); "Min active:dirty page ratio per arena: "
"N/A\n");
}
}
CTL_GET("arenas.decay_time", &ssv, ssize_t);
if (opt_purge == purge_mode_decay) {
malloc_cprintf(write_cb, cbopaque,
"Unused dirty page decay time: %zd%s\n",
ssv, (ssv < 0) ? " (no decay)" : "");
} }
if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,

View File

@ -75,7 +75,7 @@ tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
{ {
void *ret; void *ret;
arena_tcache_fill_small(arena, tbin, binind, config_prof ? arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0); tcache->prof_accumbytes : 0);
if (config_prof) if (config_prof)
tcache->prof_accumbytes = 0; tcache->prof_accumbytes = 0;
@ -143,6 +143,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
} }
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
} }
if (config_stats && !merged_stats) { if (config_stats && !merged_stats) {
/* /*
@ -226,6 +227,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
malloc_mutex_unlock(&locked_arena->lock); malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump) if (config_prof && idump)
prof_idump(); prof_idump();
arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
} }
if (config_stats && !merged_stats) { if (config_stats && !merged_stats) {
/* /*

View File

@ -147,6 +147,10 @@ time_divide(const struct timespec *time, const struct timespec *divisor)
return (t / d); return (t / d);
} }
#ifdef JEMALLOC_JET
#undef time_update
#define time_update JEMALLOC_N(time_update_impl)
#endif
bool bool
time_update(struct timespec *time) time_update(struct timespec *time)
{ {
@ -184,3 +188,8 @@ time_update(struct timespec *time)
assert(time_valid(time)); assert(time_valid(time));
return (false); return (false);
} }
#ifdef JEMALLOC_JET
#undef time_update
#define time_update JEMALLOC_N(time_update)
time_update_t *time_update = JEMALLOC_N(time_update_impl);
#endif

370
test/unit/decay.c Normal file
View File

@ -0,0 +1,370 @@
#include "test/jemalloc_test.h"
const char *malloc_conf = "purge:decay,decay_time:1";
static time_update_t *time_update_orig;
static unsigned nupdates_mock;
static struct timespec time_mock;
static bool nonmonotonic_mock;
static bool
time_update_mock(struct timespec *time)
{
nupdates_mock++;
if (!nonmonotonic_mock)
time_copy(time, &time_mock);
return (nonmonotonic_mock);
}
TEST_BEGIN(test_decay_ticks)
{
ticker_t *decay_ticker;
unsigned tick0, tick1;
size_t sz, huge0, large0;
void *p;
unsigned tcache_ind;
test_skip_if(opt_purge != purge_mode_decay);
decay_ticker = decay_ticker_get(tsd_fetch(), 0);
assert_ptr_not_null(decay_ticker,
"Unexpected failure getting decay ticker");
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
/* malloc(). */
tick0 = ticker_read(decay_ticker);
p = malloc(huge0);
assert_ptr_not_null(p, "Unexpected malloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
/* free(). */
tick0 = ticker_read(decay_ticker);
free(p);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
/* calloc(). */
tick0 = ticker_read(decay_ticker);
p = calloc(1, huge0);
assert_ptr_not_null(p, "Unexpected calloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
free(p);
/* posix_memalign(). */
tick0 = ticker_read(decay_ticker);
assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
"Unexpected posix_memalign() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during posix_memalign()");
free(p);
/* aligned_alloc(). */
tick0 = ticker_read(decay_ticker);
p = aligned_alloc(sizeof(size_t), huge0);
assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during aligned_alloc()");
free(p);
/* realloc(). */
/* Allocate. */
tick0 = ticker_read(decay_ticker);
p = realloc(NULL, huge0);
assert_ptr_not_null(p, "Unexpected realloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Reallocate. */
tick0 = ticker_read(decay_ticker);
p = realloc(p, huge0);
assert_ptr_not_null(p, "Unexpected realloc() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Deallocate. */
tick0 = ticker_read(decay_ticker);
realloc(p, 0);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/* Huge mallocx(). */
tick0 = ticker_read(decay_ticker);
p = mallocx(huge0, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during huge mallocx()");
/* Huge rallocx(). */
tick0 = ticker_read(decay_ticker);
p = rallocx(p, huge0, 0);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during huge rallocx()");
/* Huge xallocx(). */
tick0 = ticker_read(decay_ticker);
xallocx(p, huge0, 0, 0);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during huge xallocx()");
/* Huge dallocx(). */
tick0 = ticker_read(decay_ticker);
dallocx(p, 0);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during huge dallocx()");
/* Huge sdallocx(). */
p = mallocx(huge0, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick0 = ticker_read(decay_ticker);
sdallocx(p, huge0, 0);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during huge sdallocx()");
/* Large mallocx(). */
tick0 = ticker_read(decay_ticker);
p = mallocx(large0, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during large mallocx()");
/* Large rallocx(). */
tick0 = ticker_read(decay_ticker);
p = rallocx(p, large0, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during large rallocx()");
/* Large xallocx(). */
tick0 = ticker_read(decay_ticker);
xallocx(p, large0, 0, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during large xallocx()");
/* Large dallocx(). */
tick0 = ticker_read(decay_ticker);
dallocx(p, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during large dallocx()");
/* Large sdallocx(). */
p = mallocx(large0, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick0 = ticker_read(decay_ticker);
sdallocx(p, large0, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during large sdallocx()");
/* Small mallocx(). */
tick0 = ticker_read(decay_ticker);
p = mallocx(1, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during small mallocx()");
/* Small rallocx(). */
tick0 = ticker_read(decay_ticker);
p = rallocx(p, 1, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected rallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during small rallocx()");
/* Small xallocx(). */
tick0 = ticker_read(decay_ticker);
xallocx(p, 1, 0, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during small xallocx()");
/* Small dallocx(). */
tick0 = ticker_read(decay_ticker);
dallocx(p, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during small dallocx()");
/* Small sdallocx(). */
p = mallocx(1, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick0 = ticker_read(decay_ticker);
sdallocx(p, 1, MALLOCX_TCACHE_NONE);
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during small sdallocx()");
/* tcache fill. */
sz = sizeof(unsigned);
assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
tick0 = ticker_read(decay_ticker);
p = mallocx(1, MALLOCX_TCACHE(tcache_ind));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during tcache fill");
/* tcache flush. */
dallocx(p, MALLOCX_TCACHE(tcache_ind));
tick0 = ticker_read(decay_ticker);
assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tcache_ind,
sizeof(unsigned)), 0, "Unexpected mallctl failure");
tick1 = ticker_read(decay_ticker);
assert_u32_ne(tick1, tick0,
"Expected ticker to tick during tcache flush");
}
TEST_END
TEST_BEGIN(test_decay_ticker)
{
#define NPS 1024
int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
void *ps[NPS];
uint64_t epoch, npurge0, npurge1;
size_t sz, tcache_max, large;
unsigned i, nupdates0;
struct timespec time, decay_time, deadline;
test_skip_if(opt_purge != purge_mode_decay);
/*
* Allocate a bunch of large objects, pause the clock, deallocate the
* objects, restore the clock, then [md]allocx() in a tight loop to
* verify the ticker triggers purging.
*/
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
large = nallocx(tcache_max + 1, flags);
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
"Unexpected mallctl failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
for (i = 0; i < NPS; i++) {
ps[i] = mallocx(large, flags);
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
}
nupdates_mock = 0;
time_init(&time_mock, 0, 0);
time_update(&time_mock);
nonmonotonic_mock = false;
time_update_orig = time_update;
time_update = time_update_mock;
for (i = 0; i < NPS; i++) {
dallocx(ps[i], flags);
nupdates0 = nupdates_mock;
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.decay failure");
assert_u_gt(nupdates_mock, nupdates0,
"Expected time_update() to be called");
}
time_update = time_update_orig;
time_init(&time, 0, 0);
time_update(&time);
time_init(&decay_time, opt_decay_time, 0);
time_copy(&deadline, &time);
time_add(&deadline, &decay_time);
do {
for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
void *p = mallocx(1, flags);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, flags);
}
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch,
sizeof(uint64_t)), 0, "Unexpected mallctl failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
time_update(&time);
} while (time_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
#undef NPS
}
TEST_END
TEST_BEGIN(test_decay_nonmonotonic)
{
#define NPS (SMOOTHSTEP_NSTEPS + 1)
int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
void *ps[NPS];
uint64_t epoch, npurge0, npurge1;
size_t sz, large0;
unsigned i, nupdates0;
test_skip_if(opt_purge != purge_mode_decay);
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
"Unexpected mallctl failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
nupdates_mock = 0;
time_init(&time_mock, 0, 0);
time_update(&time_mock);
nonmonotonic_mock = true;
time_update_orig = time_update;
time_update = time_update_mock;
for (i = 0; i < NPS; i++) {
ps[i] = mallocx(large0, flags);
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NPS; i++) {
dallocx(ps[i], flags);
nupdates0 = nupdates_mock;
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.decay failure");
assert_u_gt(nupdates_mock, nupdates0,
"Expected time_update() to be called");
}
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
"Unexpected mallctl failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
time_update = time_update_orig;
#undef NPS
}
TEST_END
int
main(void)
{
return (test(
test_decay_ticks,
test_decay_ticker,
test_decay_nonmonotonic));
}

View File

@ -164,7 +164,9 @@ TEST_BEGIN(test_mallctl_opt)
TEST_MALLCTL_OPT(size_t, lg_chunk, always); TEST_MALLCTL_OPT(size_t, lg_chunk, always);
TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(size_t, narenas, always); TEST_MALLCTL_OPT(size_t, narenas, always);
TEST_MALLCTL_OPT(const char *, purge, always);
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
TEST_MALLCTL_OPT(ssize_t, decay_time, always);
TEST_MALLCTL_OPT(bool, stats_print, always); TEST_MALLCTL_OPT(bool, stats_print, always);
TEST_MALLCTL_OPT(const char *, junk, fill); TEST_MALLCTL_OPT(const char *, junk, fill);
TEST_MALLCTL_OPT(size_t, quarantine, fill); TEST_MALLCTL_OPT(size_t, quarantine, fill);
@ -355,6 +357,8 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult)
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
size_t sz = sizeof(ssize_t); size_t sz = sizeof(ssize_t);
test_skip_if(opt_purge != purge_mode_ratio);
assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz, assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
NULL, 0), 0, "Unexpected mallctl() failure"); NULL, 0), 0, "Unexpected mallctl() failure");
@ -382,6 +386,39 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult)
} }
TEST_END TEST_END
TEST_BEGIN(test_arena_i_decay_time)
{
ssize_t decay_time, orig_decay_time, prev_decay_time;
size_t sz = sizeof(ssize_t);
test_skip_if(opt_purge != purge_mode_decay);
assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
decay_time = -2;
assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
&decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
decay_time = TIME_SEC_MAX;
assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
&decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
for (prev_decay_time = decay_time, decay_time = -1;
decay_time < 20; prev_decay_time = decay_time, decay_time++) {
ssize_t old_decay_time;
assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time,
&sz, &decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_decay_time, prev_decay_time,
"Unexpected old arena.0.decay_time");
}
}
TEST_END
TEST_BEGIN(test_arena_i_purge) TEST_BEGIN(test_arena_i_purge)
{ {
unsigned narenas; unsigned narenas;
@ -402,6 +439,26 @@ TEST_BEGIN(test_arena_i_purge)
} }
TEST_END TEST_END
TEST_BEGIN(test_arena_i_decay)
{
unsigned narenas;
size_t sz = sizeof(unsigned);
size_t mib[3];
size_t miblen = 3;
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
TEST_END
TEST_BEGIN(test_arena_i_dss) TEST_BEGIN(test_arena_i_dss)
{ {
const char *dss_prec_old, *dss_prec_new; const char *dss_prec_old, *dss_prec_new;
@ -466,6 +523,8 @@ TEST_BEGIN(test_arenas_lg_dirty_mult)
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
size_t sz = sizeof(ssize_t); size_t sz = sizeof(ssize_t);
test_skip_if(opt_purge != purge_mode_ratio);
assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz, assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
NULL, 0), 0, "Unexpected mallctl() failure"); NULL, 0), 0, "Unexpected mallctl() failure");
@ -493,6 +552,39 @@ TEST_BEGIN(test_arenas_lg_dirty_mult)
} }
TEST_END TEST_END
TEST_BEGIN(test_arenas_decay_time)
{
ssize_t decay_time, orig_decay_time, prev_decay_time;
size_t sz = sizeof(ssize_t);
test_skip_if(opt_purge != purge_mode_decay);
assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
decay_time = -2;
assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
&decay_time, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
decay_time = TIME_SEC_MAX;
assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
&decay_time, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
for (prev_decay_time = decay_time, decay_time = -1;
decay_time < 20; prev_decay_time = decay_time, decay_time++) {
ssize_t old_decay_time;
assert_d_eq(mallctl("arenas.decay_time", &old_decay_time,
&sz, &decay_time, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_decay_time, prev_decay_time,
"Unexpected old arenas.decay_time");
}
}
TEST_END
TEST_BEGIN(test_arenas_constants) TEST_BEGIN(test_arenas_constants)
{ {
@ -621,10 +713,13 @@ main(void)
test_tcache, test_tcache,
test_thread_arena, test_thread_arena,
test_arena_i_lg_dirty_mult, test_arena_i_lg_dirty_mult,
test_arena_i_decay_time,
test_arena_i_purge, test_arena_i_purge,
test_arena_i_decay,
test_arena_i_dss, test_arena_i_dss,
test_arenas_initialized, test_arenas_initialized,
test_arenas_lg_dirty_mult, test_arenas_lg_dirty_mult,
test_arenas_decay_time,
test_arenas_constants, test_arenas_constants,
test_arenas_bin_constants, test_arenas_bin_constants,
test_arenas_lrun_constants, test_arenas_lrun_constants,