Revert re-addition of purge_lock.

Linux kernels have been capable of concurrent page table access since
2.6.27, so this hack is not necessary for modern kernels.
This commit is contained in:
Jason Evans 2010-04-08 20:31:58 -07:00
parent 68f91893bd
commit 799ca0b68d
2 changed files with 48 additions and 39 deletions

View File

@ -318,9 +318,12 @@ struct arena_s {
size_t ndirty; size_t ndirty;
/* /*
* True if pages are currently being purged by a thread. * Approximate number of pages being purged. It is possible for
* multiple threads to purge dirty pages concurrently, and they use
* npurgatory to indicate the total number of pages all threads are
* attempting to purge.
*/ */
bool purgatory; size_t npurgatory;
/* /*
* Size/address-ordered trees of this arena's available runs. The trees * Size/address-ordered trees of this arena's available runs. The trees

View File

@ -23,9 +23,6 @@ size_t sspace_max;
size_t lg_mspace; size_t lg_mspace;
size_t mspace_mask; size_t mspace_mask;
/* Used to prevent threads from concurrently calling madvise(2). */
static malloc_mutex_t purge_lock;
/* /*
* const_small_size2bin is a static constant lookup table that in the common * const_small_size2bin is a static constant lookup table that in the common
* case can be used as-is for small_size2bin. For dynamically linked programs, * case can be used as-is for small_size2bin. For dynamically linked programs,
@ -587,9 +584,10 @@ arena_maybe_purge(arena_t *arena)
{ {
/* Enforce opt_lg_dirty_mult. */ /* Enforce opt_lg_dirty_mult. */
if (opt_lg_dirty_mult >= 0 && arena->purgatory == false && if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
arena->ndirty > chunk_npages && (arena->nactive >> (arena->ndirty - arena->npurgatory) > chunk_npages &&
opt_lg_dirty_mult) < arena->ndirty) (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory))
arena_purge(arena); arena_purge(arena);
} }
@ -759,32 +757,23 @@ arena_purge(arena_t *arena)
} }
assert(ndirty == arena->ndirty); assert(ndirty == arena->ndirty);
#endif #endif
assert(arena->ndirty > arena->npurgatory);
assert(arena->ndirty > chunk_npages); assert(arena->ndirty > chunk_npages);
assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty); assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty);
/*
* Only allow one thread at a time to purge dirty pages. madvise(2)
* causes the kernel to modify virtual memory data structures that are
* typically protected by a lock, and purging isn't important enough to
* suffer lock contention in the kernel. The result of failing to
* acquire purge_lock here is that this arena will operate with ndirty
* above the threshold until some dirty pages are re-used, or the
* creation of more dirty pages causes this function to be called
* again.
*/
if (malloc_mutex_trylock(&purge_lock))
return;
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
arena->stats.npurge++; arena->stats.npurge++;
#endif #endif
/* /*
* Compute the minimum number of pages that this thread should try to * Compute the minimum number of pages that this thread should try to
* purge. * purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
*/ */
npurgatory = arena->ndirty - (arena->nactive >> opt_lg_dirty_mult); npurgatory = (arena->ndirty - arena->npurgatory) - (arena->nactive >>
arena->purgatory = true; opt_lg_dirty_mult);
arena->npurgatory += npurgatory;
while (npurgatory > 0) { while (npurgatory > 0) {
/* Get next chunk with dirty pages. */ /* Get next chunk with dirty pages. */
chunk = ql_first(&arena->chunks_dirty); chunk = ql_first(&arena->chunks_dirty);
@ -792,9 +781,11 @@ arena_purge(arena_t *arena)
/* /*
* This thread was unable to purge as many pages as * This thread was unable to purge as many pages as
* originally intended, due to races with other threads * originally intended, due to races with other threads
* that re-used dirty pages. * that either did some of the purging work, or re-used
* dirty pages.
*/ */
goto RETURN; arena->npurgatory -= npurgatory;
return;
} }
while (chunk->ndirty == 0) { while (chunk->ndirty == 0) {
ql_remove(&arena->chunks_dirty, chunk, link_dirty); ql_remove(&arena->chunks_dirty, chunk, link_dirty);
@ -802,20 +793,38 @@ arena_purge(arena_t *arena)
chunk = ql_first(&arena->chunks_dirty); chunk = ql_first(&arena->chunks_dirty);
if (chunk == NULL) { if (chunk == NULL) {
/* Same logic as for above. */ /* Same logic as for above. */
goto RETURN; arena->npurgatory -= npurgatory;
return;
} }
} }
if (chunk->ndirty >= npurgatory) { if (chunk->ndirty > npurgatory) {
/* This thread will purge all the pages in chunk. */ /*
npurgatory = 0; * This thread will, at a minimum, purge all the dirty
} else * pages in chunk, so set npurgatory to reflect this
* thread's commitment to purge the pages. This tends
* to reduce the chances of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
* threshold.
* 2) This thread drops arena->lock.
* 3) Another thread causes one or more pages to be
* dirtied, and immediately determines that it must
* purge dirty pages.
*
* If this scenario *does* play out, that's okay,
* because all of the purging work being done really
* needs to happen.
*/
arena->npurgatory += chunk->ndirty - npurgatory;
npurgatory = chunk->ndirty;
}
arena->npurgatory -= chunk->ndirty;
npurgatory -= chunk->ndirty; npurgatory -= chunk->ndirty;
arena_chunk_purge(arena, chunk); arena_chunk_purge(arena, chunk);
} }
RETURN:
arena->purgatory = false;
malloc_mutex_unlock(&purge_lock);
} }
static void static void
@ -2137,7 +2146,7 @@ arena_new(arena_t *arena, unsigned ind)
arena->nactive = 0; arena->nactive = 0;
arena->ndirty = 0; arena->ndirty = 0;
arena->purgatory = false; arena->npurgatory = 0;
arena_avail_tree_new(&arena->runs_avail_clean); arena_avail_tree_new(&arena->runs_avail_clean);
arena_avail_tree_new(&arena->runs_avail_dirty); arena_avail_tree_new(&arena->runs_avail_dirty);
@ -2433,8 +2442,5 @@ arena_boot(void)
((header_size & PAGE_MASK) != 0); ((header_size & PAGE_MASK) != 0);
arena_maxclass = chunksize - (arena_chunk_header_npages << PAGE_SHIFT); arena_maxclass = chunksize - (arena_chunk_header_npages << PAGE_SHIFT);
if (malloc_mutex_init(&purge_lock))
return (true);
return (false); return (false);
} }