Fix and refactor runs_dirty-based purging.

Fix runs_dirty-based purging to also purge dirty pages in the spare
chunk.

Refactor runs_dirty manipulation into arena_dirty_{insert,remove}(), and
move the arena->ndirty accounting into those functions.

Remove the u.ql_link field from arena_chunk_map_t, and get rid of the
enclosing union for u.rb_link, since only rb_link remains.

Remove the ndirty field from arena_chunk_t.
This commit is contained in:
Jason Evans
2014-08-14 14:45:58 -07:00
parent e8a2fd83a2
commit 070b3c3fbd
2 changed files with 91 additions and 127 deletions

View File

@@ -65,23 +65,14 @@ struct arena_chunk_map_s {
*/
union {
#endif
union {
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use
* non-full runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_t) rb_link;
/*
* List of runs currently in purgatory. arena_chunk_purge()
* temporarily allocates runs that contain dirty pages while
* purging, so that other threads cannot use the runs while the
* purging thread is operating without the arena lock held.
*/
ql_elm(arena_chunk_map_t) ql_link;
} u;
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_t) rb_link;
/* Profile counters, used for large object runs. */
prof_ctx_t *prof_ctx;
@@ -167,9 +158,6 @@ struct arena_chunk_s {
/* Arena that owns the chunk. */
arena_t *arena;
/* Number of dirty pages. */
size_t ndirty;
/*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
@@ -317,9 +305,6 @@ struct arena_s {
dss_prec_t dss_prec;
/* List of dirty runs this arena manages. */
arena_chunk_mapelms_t runs_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -349,6 +334,9 @@ struct arena_s {
*/
arena_avail_tree_t runs_avail;
/* List of dirty runs this arena manages. */
arena_chunk_mapelms_t runs_dirty;
/*
* user-configureable chunk allocation and deallocation functions.
*/