Remove the lg_tcache_gc_sweep option.
Remove the lg_tcache_gc_sweep option, because it is no longer very useful. Prior to the addition of dynamic adjustment of tcache fill count, it was possible for fill/flush overhead to be a problem, but this problem no longer occurs.
This commit is contained in:
parent
b8c8be7f8a
commit
4507f34628
@ -853,29 +853,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
||||
allocations to be satisfied without performing any thread
|
||||
synchronization, at the cost of increased memory use. See the
|
||||
<link
|
||||
linkend="opt.lg_tcache_gc_sweep"><mallctl>opt.lg_tcache_gc_sweep</mallctl></link>
|
||||
and <link
|
||||
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
|
||||
options for related tuning information. This option is enabled by
|
||||
option for related tuning information. This option is enabled by
|
||||
default.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.lg_tcache_gc_sweep">
|
||||
<term>
|
||||
<mallctl>opt.lg_tcache_gc_sweep</mallctl>
|
||||
(<type>ssize_t</type>)
|
||||
<literal>r-</literal>
|
||||
[<option>--enable-tcache</option>]
|
||||
</term>
|
||||
<listitem><para>Approximate interval (log base 2) between full
|
||||
thread-specific cache garbage collection sweeps, counted in terms of
|
||||
thread-specific cache allocation/deallocation events. Garbage
|
||||
collection is actually performed incrementally, one size class at a
|
||||
time, in order to avoid large collection pauses. The default sweep
|
||||
interval is 8192 (2^13); setting this option to -1 will disable garbage
|
||||
collection.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.lg_tcache_max">
|
||||
<term>
|
||||
<mallctl>opt.lg_tcache_max</mallctl>
|
||||
|
@ -21,12 +21,15 @@ typedef struct tcache_s tcache_t;
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
|
||||
/*
|
||||
* (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
|
||||
* events between full GC sweeps (-1: disabled). Integer rounding may cause
|
||||
* the actual number to be slightly higher, since GC is performed
|
||||
* incrementally.
|
||||
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||
* slightly higher, since GC is performed incrementally.
|
||||
*/
|
||||
#define LG_TCACHE_GC_SWEEP_DEFAULT 13
|
||||
#define TCACHE_GC_SWEEP 8192
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
#define TCACHE_GC_INCR \
|
||||
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
@ -69,7 +72,6 @@ struct tcache_s {
|
||||
|
||||
extern bool opt_tcache;
|
||||
extern ssize_t opt_lg_tcache_max;
|
||||
extern ssize_t opt_lg_tcache_gc_sweep;
|
||||
|
||||
extern tcache_bin_info_t *tcache_bin_info;
|
||||
|
||||
@ -99,9 +101,6 @@ extern size_t nhbins;
|
||||
/* Maximum cached size class. */
|
||||
extern size_t tcache_maxclass;
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
extern unsigned tcache_gc_incr;
|
||||
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
@ -166,12 +165,12 @@ JEMALLOC_INLINE void
|
||||
tcache_event(tcache_t *tcache)
|
||||
{
|
||||
|
||||
if (tcache_gc_incr == 0)
|
||||
if (TCACHE_GC_INCR == 0)
|
||||
return;
|
||||
|
||||
tcache->ev_cnt++;
|
||||
assert(tcache->ev_cnt <= tcache_gc_incr);
|
||||
if (tcache->ev_cnt == tcache_gc_incr) {
|
||||
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
|
||||
if (tcache->ev_cnt == TCACHE_GC_INCR) {
|
||||
size_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
|
@ -65,7 +65,6 @@ CTL_PROTO(opt_junk)
|
||||
CTL_PROTO(opt_zero)
|
||||
CTL_PROTO(opt_xmalloc)
|
||||
CTL_PROTO(opt_tcache)
|
||||
CTL_PROTO(opt_lg_tcache_gc_sweep)
|
||||
CTL_PROTO(opt_prof)
|
||||
CTL_PROTO(opt_prof_prefix)
|
||||
CTL_PROTO(opt_prof_active)
|
||||
@ -187,7 +186,6 @@ static const ctl_node_t opt_node[] = {
|
||||
{NAME("zero"), CTL(opt_zero)},
|
||||
{NAME("xmalloc"), CTL(opt_xmalloc)},
|
||||
{NAME("tcache"), CTL(opt_tcache)},
|
||||
{NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)},
|
||||
{NAME("prof"), CTL(opt_prof)},
|
||||
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
|
||||
{NAME("prof_active"), CTL(opt_prof_active)},
|
||||
@ -1069,8 +1067,6 @@ CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep,
|
||||
ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
|
||||
|
@ -580,8 +580,6 @@ malloc_conf_init(void)
|
||||
}
|
||||
if (config_tcache) {
|
||||
CONF_HANDLE_BOOL(tcache)
|
||||
CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
|
||||
(sizeof(size_t) << 3) - 1)
|
||||
CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
|
||||
(sizeof(size_t) << 3) - 1)
|
||||
}
|
||||
|
11
src/stats.c
11
src/stats.c
@ -490,7 +490,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
OPT_WRITE_BOOL(zero)
|
||||
OPT_WRITE_BOOL(xmalloc)
|
||||
OPT_WRITE_BOOL(tcache)
|
||||
OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
|
||||
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
||||
OPT_WRITE_BOOL(prof)
|
||||
OPT_WRITE_CHAR_P(prof_prefix)
|
||||
@ -541,16 +540,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
write_cb(cbopaque, u2s(sv, 10, s));
|
||||
write_cb(cbopaque, "\n");
|
||||
}
|
||||
if ((err = je_mallctl("opt.lg_tcache_gc_sweep", &ssv, &ssz,
|
||||
NULL, 0)) == 0) {
|
||||
size_t tcache_gc_sweep = (1U << ssv);
|
||||
bool tcache_enabled;
|
||||
CTL_GET("opt.tcache", &tcache_enabled, bool);
|
||||
write_cb(cbopaque, "Thread cache GC sweep interval: ");
|
||||
write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
|
||||
u2s(tcache_gc_sweep, 10, s) : "N/A");
|
||||
write_cb(cbopaque, "\n");
|
||||
}
|
||||
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
|
||||
bv) {
|
||||
CTL_GET("opt.lg_prof_sample", &sv, size_t);
|
||||
|
10
src/tcache.c
10
src/tcache.c
@ -6,7 +6,6 @@
|
||||
|
||||
bool opt_tcache = true;
|
||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
|
||||
|
||||
tcache_bin_info_t *tcache_bin_info;
|
||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
@ -24,7 +23,6 @@ pthread_key_t tcache_tsd;
|
||||
|
||||
size_t nhbins;
|
||||
size_t tcache_maxclass;
|
||||
unsigned tcache_gc_incr;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
@ -419,14 +417,6 @@ tcache_boot(void)
|
||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||
}
|
||||
|
||||
/* Compute incremental GC event threshold. */
|
||||
if (opt_lg_tcache_gc_sweep >= 0) {
|
||||
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
|
||||
NBINS) + (((1U << opt_lg_tcache_gc_sweep) % NBINS ==
|
||||
0) ? 0 : 1);
|
||||
} else
|
||||
tcache_gc_incr = 0;
|
||||
|
||||
if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
|
||||
0) {
|
||||
malloc_write(
|
||||
|
Loading…
Reference in New Issue
Block a user