Modify dirty page purging algorithm.
Convert chunks_dirty from a red-black tree to a doubly linked list, and use it to purge dirty pages from chunks in FIFO order. Add a lock around the code that purges dirty pages via madvise(2), in order to avoid kernel contention. If lock acquisition fails, indefinitely postpone purging dirty pages. Add a lower limit of one chunk worth of dirty pages per arena for purging, in addition to the active:dirty ratio. When purging, purge all dirty pages from at least one chunk, but rather than purging enough pages to drop to half the purging threshold, merely drop to the threshold.
This commit is contained in:
@@ -178,11 +178,11 @@ struct arena_chunk_s {
|
||||
/* Arena that owns the chunk. */
|
||||
arena_t *arena;
|
||||
|
||||
/* Linkage for the arena's chunks_dirty tree. */
|
||||
rb_node(arena_chunk_t) link_dirty;
|
||||
/* Linkage for the arena's chunks_dirty list. */
|
||||
ql_elm(arena_chunk_t) link_dirty;
|
||||
|
||||
/*
|
||||
* True if the chunk is currently in the chunks_dirty tree, due to
|
||||
* True if the chunk is currently in the chunks_dirty list, due to
|
||||
* having at some point contained one or more dirty pages. Removal
|
||||
* from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
|
||||
*/
|
||||
@@ -287,8 +287,8 @@ struct arena_s {
|
||||
uint64_t prof_accumbytes;
|
||||
#endif
|
||||
|
||||
/* Tree of dirty-page-containing chunks this arena manages. */
|
||||
arena_chunk_tree_t chunks_dirty;
|
||||
/* List of dirty-page-containing chunks this arena manages. */
|
||||
ql_head(arena_chunk_t) chunks_dirty;
|
||||
|
||||
/*
|
||||
* In order to avoid rapid chunk allocation/deallocation when an arena
|
||||
|
@@ -25,6 +25,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
||||
bool malloc_mutex_trylock(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
||||
#endif
|
||||
|
||||
@@ -37,6 +38,16 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
|
||||
pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
malloc_mutex_trylock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
if (isthreaded)
|
||||
return (pthread_mutex_trylock(mutex) != 0);
|
||||
else
|
||||
return (false);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
||||
{
|
||||
|
Reference in New Issue
Block a user