Enhance the G/g MALLOC_OPTIONS flags to control GC sweep interval, rather than

just enabling/disabling GC.
This commit is contained in:
Jason Evans 2010-01-03 14:45:26 -08:00
parent 952b7d192b
commit 3f3ecfb8e8
2 changed files with 43 additions and 15 deletions

View File

@ -245,9 +245,14 @@ The default minimum ratio is 32:1;
.Ev JEMALLOC_OPTIONS=6D .Ev JEMALLOC_OPTIONS=6D
will disable dirty page purging. will disable dirty page purging.
@roff_tcache@.It G @roff_tcache@.It G
@roff_tcache@Enable/disable incremental garbage collection of unused objects @roff_tcache@Double/halve the approximate interval (counted in terms of
@roff_tcache@stored in thread-specific caches. @roff_tcache@thread-specific cache allocation/deallocation events) between full
@roff_tcache@This option is enabled by default. @roff_tcache@thread-specific cache garbage collection sweeps.
@roff_tcache@Garbage collection is actually performed incrementally, one size
@roff_tcache@class at a time, in order to avoid large collection pauses.
@roff_tcache@The default sweep interval is 8192;
@roff_tcache@.Ev JEMALLOC_OPTIONS=14g
@roff_tcache@will disable garbage collection.
@roff_tcache@.It H @roff_tcache@.It H
@roff_tcache@When there are multiple threads, use thread-specific caching for @roff_tcache@When there are multiple threads, use thread-specific caching for
@roff_tcache@small and medium objects. @roff_tcache@small and medium objects.

View File

@ -276,11 +276,12 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.183 2008/12/01 10:20:59 jas
# define TCACHE_LG_NSLOTS 7 # define TCACHE_LG_NSLOTS 7
# define TCACHE_NSLOTS (1U << TCACHE_LG_NSLOTS) # define TCACHE_NSLOTS (1U << TCACHE_LG_NSLOTS)
/* /*
* Approximate number of allocation events between full GC sweeps. Integer * (1U << opt_lg_tcache_gc_sweep) is the approximate number of
* allocation events between full GC sweeps (-1: disabled). Integer
* rounding may cause the actual number to be slightly higher, since GC is * rounding may cause the actual number to be slightly higher, since GC is
* performed incrementally. * performed incrementally.
*/ */
# define TCACHE_GC_THRESHOLD 8192 # define LG_TCACHE_GC_SWEEP_DEFAULT 13
#endif #endif
/******************************************************************************/ /******************************************************************************/
@ -1038,7 +1039,7 @@ static __thread tcache_t *tcache_tls
static pthread_key_t tcache_tsd; static pthread_key_t tcache_tsd;
/* Number of tcache allocation/deallocation events between incremental GCs. */ /* Number of tcache allocation/deallocation events between incremental GCs. */
unsigned tcache_gc_threshold; unsigned tcache_gc_incr;
#endif #endif
/* /*
@ -1080,7 +1081,7 @@ static bool opt_junk = false;
#endif #endif
#ifdef JEMALLOC_TCACHE #ifdef JEMALLOC_TCACHE
static bool opt_tcache = true; static bool opt_tcache = true;
static size_t opt_tcache_gc = true; static ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
#endif #endif
static ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; static ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static bool opt_stats_print = false; static bool opt_stats_print = false;
@ -3107,9 +3108,12 @@ static inline void
tcache_event(tcache_t *tcache) tcache_event(tcache_t *tcache)
{ {
if (tcache_gc_incr == 0)
return;
tcache->ev_cnt++; tcache->ev_cnt++;
assert(tcache->ev_cnt <= tcache_gc_threshold); assert(tcache->ev_cnt <= tcache_gc_incr);
if (tcache->ev_cnt >= tcache_gc_threshold) { if (tcache->ev_cnt >= tcache_gc_incr) {
size_t binind = tcache->next_gc_bin; size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = tcache->tbins[binind]; tcache_bin_t *tbin = tcache->tbins[binind];
@ -5609,10 +5613,13 @@ MALLOC_OUT:
break; break;
#ifdef JEMALLOC_TCACHE #ifdef JEMALLOC_TCACHE
case 'g': case 'g':
opt_tcache_gc = false; if (opt_lg_tcache_gc_sweep >= 0)
opt_lg_tcache_gc_sweep--;
break; break;
case 'G': case 'G':
opt_tcache_gc = true; if (opt_lg_tcache_gc_sweep + 1 <
(sizeof(size_t) << 3))
opt_lg_tcache_gc_sweep++;
break; break;
case 'h': case 'h':
opt_tcache = false; opt_tcache = false;
@ -5804,8 +5811,12 @@ MALLOC_OUT:
#ifdef JEMALLOC_TCACHE #ifdef JEMALLOC_TCACHE
/* Compute incremental GC event threshold. */ /* Compute incremental GC event threshold. */
tcache_gc_threshold = (TCACHE_GC_THRESHOLD / nbins) + if (opt_lg_tcache_gc_sweep >= 0) {
((TCACHE_GC_THRESHOLD % nbins == 0) ? 0 : 1); tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins == 0)
? 0 : 1);
} else
tcache_gc_incr = 0;
#endif #endif
/* Set variables according to the value of opt_lg_chunk. */ /* Set variables according to the value of opt_lg_chunk. */
@ -6387,7 +6398,6 @@ malloc_stats_print(const char *opts)
malloc_message("Boolean JEMALLOC_OPTIONS: ", malloc_message("Boolean JEMALLOC_OPTIONS: ",
opt_abort ? "A" : "a", "", ""); opt_abort ? "A" : "a", "", "");
#ifdef JEMALLOC_TCACHE #ifdef JEMALLOC_TCACHE
malloc_message(opt_tcache_gc ? "G" : "g", "", "", "");
malloc_message(opt_tcache ? "H" : "h", "", "", ""); malloc_message(opt_tcache ? "H" : "h", "", "", "");
#endif #endif
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
@ -6448,7 +6458,20 @@ malloc_stats_print(const char *opts)
"Min active:dirty page ratio per arena: N/A\n", "Min active:dirty page ratio per arena: N/A\n",
"", "", ""); "", "", "");
} }
#ifdef JEMALLOC_TCACHE
if (opt_tcache) {
malloc_message("Thread cache GC sweep interval: ",
(tcache_gc_incr > 0) ?
umax2s((1U << opt_lg_tcache_gc_sweep), 10, s)
: "N/A",
"", "");
malloc_message(" (increment interval: ",
(tcache_gc_incr > 0) ?
umax2s(tcache_gc_incr, 10, s)
: "N/A",
")\n", "");
}
#endif
malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "", malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "",
""); "");
malloc_message(" (2^", umax2s(opt_lg_chunk, 10, s), ")\n", ""); malloc_message(" (2^", umax2s(opt_lg_chunk, 10, s), ")\n", "");