Remove the lg_tcache_gc_sweep option.

Remove the lg_tcache_gc_sweep option, because it is no longer
very useful.  Prior to the addition of dynamic adjustment of tcache fill
count, it was possible for fill/flush overhead to be a problem, but this
problem no longer occurs.
This commit is contained in:
Jason Evans 2012-03-05 14:34:37 -08:00
parent b8c8be7f8a
commit 4507f34628
6 changed files with 12 additions and 58 deletions

View File

@ -853,29 +853,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
allocations to be satisfied without performing any thread allocations to be satisfied without performing any thread
synchronization, at the cost of increased memory use. See the synchronization, at the cost of increased memory use. See the
<link <link
linkend="opt.lg_tcache_gc_sweep"><mallctl>opt.lg_tcache_gc_sweep</mallctl></link>
and <link
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link> linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
options for related tuning information. This option is enabled by option for related tuning information. This option is enabled by
default.</para></listitem> default.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_tcache_gc_sweep">
<term>
<mallctl>opt.lg_tcache_gc_sweep</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
[<option>--enable-tcache</option>]
</term>
<listitem><para>Approximate interval (log base 2) between full
thread-specific cache garbage collection sweeps, counted in terms of
thread-specific cache allocation/deallocation events. Garbage
collection is actually performed incrementally, one size class at a
time, in order to avoid large collection pauses. The default sweep
interval is 8192 (2^13); setting this option to -1 will disable garbage
collection.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_tcache_max"> <varlistentry id="opt.lg_tcache_max">
<term> <term>
<mallctl>opt.lg_tcache_max</mallctl> <mallctl>opt.lg_tcache_max</mallctl>

View File

@ -21,12 +21,15 @@ typedef struct tcache_s tcache_t;
#define LG_TCACHE_MAXCLASS_DEFAULT 15 #define LG_TCACHE_MAXCLASS_DEFAULT 15
/* /*
* (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation * TCACHE_GC_SWEEP is the approximate number of allocation events between
* events between full GC sweeps (-1: disabled). Integer rounding may cause * full GC sweeps. Integer rounding may cause the actual number to be
* the actual number to be slightly higher, since GC is performed * slightly higher, since GC is performed incrementally.
* incrementally.
*/ */
#define LG_TCACHE_GC_SWEEP_DEFAULT 13 #define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
/******************************************************************************/ /******************************************************************************/
@ -69,7 +72,6 @@ struct tcache_s {
extern bool opt_tcache; extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max; extern ssize_t opt_lg_tcache_max;
extern ssize_t opt_lg_tcache_gc_sweep;
extern tcache_bin_info_t *tcache_bin_info; extern tcache_bin_info_t *tcache_bin_info;
@ -99,9 +101,6 @@ extern size_t nhbins;
/* Maximum cached size class. */ /* Maximum cached size class. */
extern size_t tcache_maxclass; extern size_t tcache_maxclass;
/* Number of tcache allocation/deallocation events between incremental GCs. */
extern unsigned tcache_gc_incr;
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache); tcache_t *tcache);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
@ -166,12 +165,12 @@ JEMALLOC_INLINE void
tcache_event(tcache_t *tcache) tcache_event(tcache_t *tcache)
{ {
if (tcache_gc_incr == 0) if (TCACHE_GC_INCR == 0)
return; return;
tcache->ev_cnt++; tcache->ev_cnt++;
assert(tcache->ev_cnt <= tcache_gc_incr); assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == tcache_gc_incr) { if (tcache->ev_cnt == TCACHE_GC_INCR) {
size_t binind = tcache->next_gc_bin; size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind]; tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];

View File

@ -65,7 +65,6 @@ CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero) CTL_PROTO(opt_zero)
CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache) CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_gc_sweep)
CTL_PROTO(opt_prof) CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active) CTL_PROTO(opt_prof_active)
@ -187,7 +186,6 @@ static const ctl_node_t opt_node[] = {
{NAME("zero"), CTL(opt_zero)}, {NAME("zero"), CTL(opt_zero)},
{NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)}, {NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)},
{NAME("prof"), CTL(opt_prof)}, {NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)}, {NAME("prof_active"), CTL(opt_prof_active)},
@ -1069,8 +1067,6 @@ CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep,
ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */

View File

@ -580,8 +580,6 @@ malloc_conf_init(void)
} }
if (config_tcache) { if (config_tcache) {
CONF_HANDLE_BOOL(tcache) CONF_HANDLE_BOOL(tcache)
CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(lg_tcache_max, -1, CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
(sizeof(size_t) << 3) - 1) (sizeof(size_t) << 3) - 1)
} }

View File

@ -490,7 +490,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache) OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
OPT_WRITE_SSIZE_T(lg_tcache_max) OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof) OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix) OPT_WRITE_CHAR_P(prof_prefix)
@ -541,16 +540,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
write_cb(cbopaque, u2s(sv, 10, s)); write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n"); write_cb(cbopaque, "\n");
} }
if ((err = je_mallctl("opt.lg_tcache_gc_sweep", &ssv, &ssz,
NULL, 0)) == 0) {
size_t tcache_gc_sweep = (1U << ssv);
bool tcache_enabled;
CTL_GET("opt.tcache", &tcache_enabled, bool);
write_cb(cbopaque, "Thread cache GC sweep interval: ");
write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
u2s(tcache_gc_sweep, 10, s) : "N/A");
write_cb(cbopaque, "\n");
}
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
bv) { bv) {
CTL_GET("opt.lg_prof_sample", &sv, size_t); CTL_GET("opt.lg_prof_sample", &sv, size_t);

View File

@ -6,7 +6,6 @@
bool opt_tcache = true; bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
tcache_bin_info_t *tcache_bin_info; tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */ static unsigned stack_nelms; /* Total stack elms per tcache. */
@ -24,7 +23,6 @@ pthread_key_t tcache_tsd;
size_t nhbins; size_t nhbins;
size_t tcache_maxclass; size_t tcache_maxclass;
unsigned tcache_gc_incr;
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
@ -419,14 +417,6 @@ tcache_boot(void)
stack_nelms += tcache_bin_info[i].ncached_max; stack_nelms += tcache_bin_info[i].ncached_max;
} }
/* Compute incremental GC event threshold. */
if (opt_lg_tcache_gc_sweep >= 0) {
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
NBINS) + (((1U << opt_lg_tcache_gc_sweep) % NBINS ==
0) ? 0 : 1);
} else
tcache_gc_incr = 0;
if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) != if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
0) { 0) {
malloc_write( malloc_write(