Refactor out arena_compute_npurge().

Refactor out arena_compute_npurge() by integrating its logic into
arena_stash_dirty() as an incremental computation.
This commit is contained in:
Jason Evans 2016-02-19 19:51:23 -08:00
parent db927b6727
commit 1a4ad3c0fa

View File

@ -23,7 +23,7 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition. * definition.
*/ */
static void arena_purge(arena_t *arena, bool all); static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned, bool decommitted); bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
@ -1205,16 +1205,14 @@ arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
return (false); return (false);
} }
void static void
arena_maybe_purge(arena_t *arena) arena_maybe_purge_ratio(arena_t *arena)
{ {
/* Don't purge if the option is disabled. */ /* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0) if (arena->lg_dirty_mult < 0)
return; return;
/* Don't recursively purge. */
if (arena->purging)
return;
/* /*
* Iterate, since preventing recursive purging could otherwise leave too * Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages. * many dirty pages.
@ -1229,10 +1227,21 @@ arena_maybe_purge(arena_t *arena)
*/ */
if (arena->ndirty <= threshold) if (arena->ndirty <= threshold)
return; return;
arena_purge(arena, false); arena_purge_to_limit(arena, threshold);
} }
} }
void
arena_maybe_purge(arena_t *arena)
{
/* Don't recursively purge. */
if (arena->purging)
return;
arena_maybe_purge_ratio(arena);
}
static size_t static size_t
arena_dirty_count(arena_t *arena) arena_dirty_count(arena_t *arena)
{ {
@ -1268,35 +1277,15 @@ arena_dirty_count(arena_t *arena)
} }
static size_t static size_t
arena_compute_npurge(arena_t *arena, bool all) arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
{ size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
size_t npurge;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
if (!all) {
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
threshold = threshold < chunk_npages ? chunk_npages : threshold;
npurge = arena->ndirty - threshold;
} else
npurge = arena->ndirty;
return (npurge);
}
static size_t
arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
arena_runs_dirty_link_t *rdelm, *rdelm_next; arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm; extent_node_t *chunkselm;
size_t nstashed = 0; size_t nstashed = 0;
/* Stash at least npurge pages. */ /* Stash runs/chunks according to ndirty_limit. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link), for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link); chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) { rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
@ -1308,6 +1297,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
bool zero; bool zero;
UNUSED void *chunk; UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm_next = qr_next(chunkselm, cc_link); chunkselm_next = qr_next(chunkselm, cc_link);
/* /*
* Allocate. chunkselm remains valid due to the * Allocate. chunkselm remains valid due to the
@ -1322,7 +1313,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
assert(zero == extent_node_zeroed_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel, extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel); purge_chunks_sentinel);
npages = extent_node_size_get(chunkselm) >> LG_PAGE; assert(npages == (extent_node_size_get(chunkselm) >>
LG_PAGE));
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
} else { } else {
arena_chunk_t *chunk = arena_chunk_t *chunk =
@ -1360,7 +1352,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
} }
nstashed += npages; nstashed += npages;
if (!all && nstashed >= npurge) if (arena->ndirty - nstashed <= ndirty_limit)
break; break;
} }
@ -1501,10 +1493,10 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
static void static void
arena_purge(arena_t *arena, bool all) arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
{ {
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
size_t npurge, npurgeable, npurged; size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel; arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel; extent_node_t purge_chunks_sentinel;
@ -1518,24 +1510,26 @@ arena_purge(arena_t *arena, bool all)
size_t ndirty = arena_dirty_count(arena); size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty); assert(ndirty == arena->ndirty);
} }
assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty ||
ndirty_limit == 0);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
if (npurge == 0)
goto label_return;
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
assert(npurged == npurge);
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
if (config_stats) if (config_stats)
arena->stats.npurge++; arena->stats.npurge++;
npurge = arena_compute_npurge(arena, all); label_return:
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
&purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurgeable >= npurge);
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
assert(npurged == npurgeable);
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
arena->purging = false; arena->purging = false;
} }
@ -1544,7 +1538,7 @@ arena_purge_all(arena_t *arena)
{ {
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
arena_purge(arena, true); arena_purge_to_limit(arena, 0);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
} }