Convert arena_maybe_purge() recursion to iteration.

This resolves #235.
This commit is contained in:
Jason Evans 2015-06-22 18:50:32 -07:00
parent dc0610a714
commit 0a9f9a4d51
2 changed files with 28 additions and 11 deletions

View File

@ -316,6 +316,9 @@ struct arena_s {
/* Minimum ratio (log base 2) of nactive:ndirty. */ /* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t lg_dirty_mult; ssize_t lg_dirty_mult;
/* True if a thread is currently executing arena_purge(). */
bool purging;
/* Number of pages in active runs and huge regions. */ /* Number of pages in active runs and huge regions. */
size_t nactive; size_t nactive;

View File

@ -1143,22 +1143,30 @@ arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
void void
arena_maybe_purge(arena_t *arena) arena_maybe_purge(arena_t *arena)
{ {
size_t threshold;
/* Don't purge if the option is disabled. */ /* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0) if (arena->lg_dirty_mult < 0)
return; return;
threshold = (arena->nactive >> arena->lg_dirty_mult); /* Don't recursively purge. */
threshold = threshold < chunk_npages ? chunk_npages : threshold; if (arena->purging)
return;
/*
* Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages.
*/
while (true) {
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
if (threshold < chunk_npages)
threshold = chunk_npages;
/* /*
* Don't purge unless the number of purgeable pages exceeds the * Don't purge unless the number of purgeable pages exceeds the
* threshold. * threshold.
*/ */
if (arena->ndirty <= threshold) if (arena->ndirty <= threshold)
return; return;
arena_purge(arena, false); arena_purge(arena, false);
} }
}
static size_t static size_t
arena_dirty_count(arena_t *arena) arena_dirty_count(arena_t *arena)
@ -1411,6 +1419,8 @@ arena_purge(arena_t *arena, bool all)
arena_runs_dirty_link_t purge_runs_sentinel; arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel; extent_node_t purge_chunks_sentinel;
arena->purging = true;
/* /*
* Calls to arena_dirty_count() are disabled even for debug builds * Calls to arena_dirty_count() are disabled even for debug builds
* because overhead grows nonlinearly as memory usage increases. * because overhead grows nonlinearly as memory usage increases.
@ -1436,6 +1446,8 @@ arena_purge(arena_t *arena, bool all)
assert(npurged == npurgeable); assert(npurged == npurgeable);
arena_unstash_purged(arena, &purge_runs_sentinel, arena_unstash_purged(arena, &purge_runs_sentinel,
&purge_chunks_sentinel); &purge_chunks_sentinel);
arena->purging = false;
} }
void void
@ -2053,7 +2065,8 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
* for 4 KiB pages and 64-byte cachelines. * for 4 KiB pages and 64-byte cachelines.
*/ */
prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
UINT64_C(6364136223846793009), UINT64_C(1442695040888963409)); UINT64_C(6364136223846793009),
UINT64_C(1442695040888963409));
random_offset = ((uintptr_t)r) << LG_CACHELINE; random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else } else
random_offset = 0; random_offset = 0;
@ -2873,6 +2886,7 @@ arena_new(unsigned ind)
arena->spare = NULL; arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
arena->purging = false;
arena->nactive = 0; arena->nactive = 0;
arena->ndirty = 0; arena->ndirty = 0;