Dynamically adjust tcache fill count.

Dynamically adjust tcache fill count (number of objects allocated per
tcache refill) such that if GC has to flush inactive objects, the fill
count gradually decreases.  Conversely, if refills occur while the fill
count is depressed, the fill count gradually increases back to its
maximum value.
This commit is contained in:
Jason Evans 2011-03-21 00:18:17 -07:00
parent 893a0ed7c8
commit 1dcb4f86b2
3 changed files with 27 additions and 9 deletions

View File

@ -45,7 +45,8 @@ struct tcache_bin_s {
# ifdef JEMALLOC_STATS
tcache_bin_stats_t tstats;
# endif
unsigned low_water; /* Min # cached since last GC. */
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
void **avail; /* Stack of available objects. */
};
@ -184,6 +185,7 @@ tcache_event(tcache_t *tcache)
if (tcache->ev_cnt == tcache_gc_incr) {
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
@ -207,6 +209,20 @@ tcache_event(tcache_t *tcache)
#endif
);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
>= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div
* stays greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
@ -222,10 +238,12 @@ tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
if (tbin->ncached == 0)
if (tbin->ncached == 0) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
if (tbin->ncached < tbin->low_water)
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);

View File

@ -1386,8 +1386,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
#endif
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1);
i < nfill; i++) {
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
@ -1398,8 +1398,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
tbin->avail[nfill - 1 - i] = ptr;
}
#ifdef JEMALLOC_STATS
bin->stats.allocated += (i - tbin->ncached) *
arena_bin_info[binind].reg_size;
bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
bin->stats.nfills++;

View File

@ -135,7 +135,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if (tbin->ncached < tbin->low_water)
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
@ -218,7 +218,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if (tbin->ncached < tbin->low_water)
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
@ -265,6 +265,7 @@ tcache_create(arena_t *arena)
tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);