Dynamically adjust tcache fill count.
Dynamically adjust tcache fill count (number of objects allocated per tcache refill) such that if GC has to flush inactive objects, the fill count gradually decreases. Conversely, if refills occur while the fill count is depressed, the fill count gradually increases back to its maximum value.
This commit is contained in:
parent
893a0ed7c8
commit
1dcb4f86b2
@ -45,7 +45,8 @@ struct tcache_bin_s {
|
|||||||
# ifdef JEMALLOC_STATS
|
# ifdef JEMALLOC_STATS
|
||||||
tcache_bin_stats_t tstats;
|
tcache_bin_stats_t tstats;
|
||||||
# endif
|
# endif
|
||||||
unsigned low_water; /* Min # cached since last GC. */
|
int low_water; /* Min # cached since last GC. */
|
||||||
|
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
||||||
unsigned ncached; /* # of cached objects. */
|
unsigned ncached; /* # of cached objects. */
|
||||||
void **avail; /* Stack of available objects. */
|
void **avail; /* Stack of available objects. */
|
||||||
};
|
};
|
||||||
@ -184,6 +185,7 @@ tcache_event(tcache_t *tcache)
|
|||||||
if (tcache->ev_cnt == tcache_gc_incr) {
|
if (tcache->ev_cnt == tcache_gc_incr) {
|
||||||
size_t binind = tcache->next_gc_bin;
|
size_t binind = tcache->next_gc_bin;
|
||||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||||
|
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||||
|
|
||||||
if (tbin->low_water > 0) {
|
if (tbin->low_water > 0) {
|
||||||
/*
|
/*
|
||||||
@ -207,6 +209,20 @@ tcache_event(tcache_t *tcache)
|
|||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||||
|
* the fill count is always at least 1.
|
||||||
|
*/
|
||||||
|
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
|
||||||
|
>= 1)
|
||||||
|
tbin->lg_fill_div++;
|
||||||
|
} else if (tbin->low_water < 0) {
|
||||||
|
/*
|
||||||
|
* Increase fill count by 2X. Make sure lg_fill_div
|
||||||
|
* stays greater than 0.
|
||||||
|
*/
|
||||||
|
if (tbin->lg_fill_div > 1)
|
||||||
|
tbin->lg_fill_div--;
|
||||||
}
|
}
|
||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
|
|
||||||
@ -222,10 +238,12 @@ tcache_alloc_easy(tcache_bin_t *tbin)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
if (tbin->ncached == 0)
|
if (tbin->ncached == 0) {
|
||||||
|
tbin->low_water = -1;
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
}
|
||||||
tbin->ncached--;
|
tbin->ncached--;
|
||||||
if (tbin->ncached < tbin->low_water)
|
if ((int)tbin->ncached < tbin->low_water)
|
||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
ret = tbin->avail[tbin->ncached];
|
ret = tbin->avail[tbin->ncached];
|
||||||
return (ret);
|
return (ret);
|
||||||
|
@ -1386,8 +1386,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
|
|||||||
#endif
|
#endif
|
||||||
bin = &arena->bins[binind];
|
bin = &arena->bins[binind];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1);
|
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
|
||||||
i < nfill; i++) {
|
tbin->lg_fill_div); i < nfill; i++) {
|
||||||
if ((run = bin->runcur) != NULL && run->nfree > 0)
|
if ((run = bin->runcur) != NULL && run->nfree > 0)
|
||||||
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
|
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
|
||||||
else
|
else
|
||||||
@ -1398,8 +1398,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
|
|||||||
tbin->avail[nfill - 1 - i] = ptr;
|
tbin->avail[nfill - 1 - i] = ptr;
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
bin->stats.allocated += (i - tbin->ncached) *
|
bin->stats.allocated += i * arena_bin_info[binind].reg_size;
|
||||||
arena_bin_info[binind].reg_size;
|
|
||||||
bin->stats.nmalloc += i;
|
bin->stats.nmalloc += i;
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
bin->stats.nfills++;
|
bin->stats.nfills++;
|
||||||
|
@ -135,7 +135,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||||
rem * sizeof(void *));
|
rem * sizeof(void *));
|
||||||
tbin->ncached = rem;
|
tbin->ncached = rem;
|
||||||
if (tbin->ncached < tbin->low_water)
|
if ((int)tbin->ncached < tbin->low_water)
|
||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +218,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||||
rem * sizeof(void *));
|
rem * sizeof(void *));
|
||||||
tbin->ncached = rem;
|
tbin->ncached = rem;
|
||||||
if (tbin->ncached < tbin->low_water)
|
if ((int)tbin->ncached < tbin->low_water)
|
||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,6 +265,7 @@ tcache_create(arena_t *arena)
|
|||||||
tcache->arena = arena;
|
tcache->arena = arena;
|
||||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||||
for (i = 0; i < nhbins; i++) {
|
for (i = 0; i < nhbins; i++) {
|
||||||
|
tcache->tbins[i].lg_fill_div = 1;
|
||||||
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
|
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
|
||||||
(uintptr_t)stack_offset);
|
(uintptr_t)stack_offset);
|
||||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||||
|
Loading…
Reference in New Issue
Block a user