Maintain all the dirty runs in a linked list for each arena

This commit is contained in:
Qinfan Wu 2014-07-18 14:21:17 -07:00
parent dd03242da9
commit 04d60a132b
2 changed files with 53 additions and 0 deletions

View File

@ -89,6 +89,9 @@ struct arena_chunk_map_s {
}; /* union { ... }; */
#endif
/* Linkage for list of dirty runs. */
ql_elm(arena_chunk_map_t) dr_link;
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
@ -333,6 +336,9 @@ struct arena_s {
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t chunks_dirty;
/* List of dirty runs this arena manages. */
arena_chunk_mapelms_t runs_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most

View File

@ -394,6 +394,7 @@ static void
arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
size_t flag_dirty, size_t need_pages)
{
arena_chunk_map_t *mapelm;
size_t total_pages, rem_pages;
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
@ -404,6 +405,11 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
rem_pages = total_pages - need_pages;
arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
if (flag_dirty != 0) {
/* If the run is dirty, it must be in the dirty list. */
mapelm = arena_mapp_get(chunk, run_ind);
ql_remove(&arena->runs_dirty, mapelm, dr_link);
}
arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
@ -416,6 +422,14 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_mapbits_unallocated_set(chunk,
run_ind+total_pages-1, (rem_pages << LG_PAGE),
flag_dirty);
mapelm = arena_mapp_get(chunk, run_ind+need_pages);
/*
* Append the trailing run at the end of the dirty list.
* We could also insert the run at the original place.
* Let us consider this later.
*/
ql_elm_new(mapelm, dr_link);
ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
} else {
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE),
@ -701,6 +715,11 @@ arena_chunk_alloc(arena_t *arena)
/* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
false, false);
if (arena_mapbits_dirty_get(chunk, map_bias) != 0) {
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, map_bias);
ql_elm_new(mapelm, dr_link);
ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
}
return (chunk);
}
@ -739,6 +758,7 @@ arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
static void
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
@ -754,6 +774,10 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
*/
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
false, false);
if (arena_mapbits_dirty_get(chunk, map_bias) != 0) {
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, map_bias);
ql_remove(&arena->runs_dirty, mapelm, dr_link);
}
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
@ -1216,6 +1240,13 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
false, true);
/* If the successor is dirty, remove it from runs_dirty. */
if (flag_dirty != 0) {
arena_chunk_map_t *mapelm = arena_mapp_get(chunk,
run_ind+run_pages);
ql_remove(&arena->runs_dirty, mapelm, dr_link);
}
size += nrun_size;
run_pages += nrun_pages;
@ -1244,6 +1275,13 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
false);
/* If the predecessor is dirty, remove it from runs_dirty. */
if (flag_dirty != 0) {
arena_chunk_map_t *mapelm = arena_mapp_get(chunk,
run_ind);
ql_remove(&arena->runs_dirty, mapelm, dr_link);
}
size += prun_size;
run_pages += prun_pages;
@ -1261,6 +1299,7 @@ static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
{
arena_chunk_t *chunk;
arena_chunk_map_t *mapelm;
size_t size, run_ind, run_pages, flag_dirty;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
@ -1315,6 +1354,13 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
if (dirty) {
/* Insert into runs_dirty list. */
mapelm = arena_mapp_get(chunk, run_ind);
ql_elm_new(mapelm, dr_link);
ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
}
/* Deallocate chunk if it is now completely unused. */
if (size == arena_maxclass) {
assert(run_ind == map_bias);
@ -2437,6 +2483,7 @@ arena_new(arena_t *arena, unsigned ind)
/* Initialize chunks. */
arena_chunk_dirty_new(&arena->chunks_dirty);
ql_new(&arena->runs_dirty);
arena->spare = NULL;
arena->nactive = 0;