Refactor dirty run linkage to reduce sizeof(extent_node_t).

This commit is contained in:
Jason Evans 2015-03-10 18:15:40 -07:00
parent 54673fd8d7
commit 38e42d311c
4 changed files with 95 additions and 57 deletions

View File

@ -23,6 +23,7 @@
*/
#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
@ -120,6 +121,10 @@ struct arena_chunk_map_bits_s {
#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
};
struct arena_runs_dirty_link_s {
qr(arena_runs_dirty_link_t) rd_link;
};
/*
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
* like arena_chunk_map_bits_t. Two separate arrays are stored within each
@ -131,13 +136,13 @@ struct arena_chunk_map_misc_s {
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
* runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_misc_t) rb_link;
union {
/* Linkage for list of dirty runs. */
qr(arena_chunk_map_misc_t) rd_link;
arena_runs_dirty_link_t rd;
/* Profile counters, used for large object runs. */
prof_tctx_t *prof_tctx;
@ -324,15 +329,27 @@ struct arena_s {
*
* LRU-----------------------------------------------------------MRU
*
* ______________ ___ ___
* ...-->|chunks_cache|<--------->|c|<-------------------->|c|<--...
* -------------- |h| |h|
* ____________ _____ |u| _____ _____ |u|
* ...-->|runs_dirty|<-->|run|<-->|n|<-->|run|<-->|run|<-->|n|<--...
* ------------ ----- |k| ----- ----- |k|
* --- ---
* /------------------\
* | arena |
* | |
* | /------------\ | /-----------\
* ...---->|chunks_cache|<---------------------->| chunk |<--...
* | \------------/ | | |
* | | | |
* | | /---\ /---\ | |
* | | |run| |run| | |
* | | | | | | | |
* | /----------\ | |---| |---| | /-----\ |
* ...----->|runs_dirty|<---->|rd |<---->|rd |<---->|rdelm|<-----...
* | \----------/ | |---| |---| | \-----/ |
* | | | | | | | |
* | | | | | | | |
* | | \---/ \---/ | |
* | | | |
* | | | |
* \------------------/ \-----------/
*/
arena_chunk_map_misc_t runs_dirty;
arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache;
/* Extant huge allocations. */
@ -465,6 +482,7 @@ arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
@ -556,6 +574,18 @@ arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
{
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
*)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
return (miscelm);
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_run_to_miscelm(arena_run_t *run)
{

View File

@ -34,7 +34,7 @@ struct extent_node_s {
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_chunk_map_misc_t runs_dirty;
arena_runs_dirty_link_t rdelm;
qr(extent_node_t) cc_link;
union {
@ -79,7 +79,7 @@ void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, bool zeroed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty);
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
@ -186,16 +186,16 @@ JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->runs_dirty, rd_link);
qr_new(&node->rdelm, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty)
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->runs_dirty, rd_link);
qr_meld(runs_dirty, &node->rdelm, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
@ -203,7 +203,7 @@ JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->runs_dirty, rd_link);
qr_remove(&node->rdelm, rd_link);
qr_remove(node, cc_link);
}

View File

@ -81,6 +81,7 @@ arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_run_regind
arena_run_to_miscelm

View File

@ -136,8 +136,8 @@ arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
qr_new(miscelm, rd_link);
qr_meld(&arena->runs_dirty, miscelm, rd_link);
qr_new(&miscelm->rd, rd_link);
qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
arena->ndirty += npages;
}
@ -153,7 +153,7 @@ arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
qr_remove(miscelm, rd_link);
qr_remove(&miscelm->rd, rd_link);
assert(arena->ndirty >= npages);
arena->ndirty -= npages;
}
@ -1056,22 +1056,23 @@ static size_t
arena_dirty_count(arena_t *arena)
{
size_t ndirty = 0;
arena_chunk_map_misc_t *runselm;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
for (runselm = qr_next(&arena->runs_dirty, rd_link),
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
runselm != &arena->runs_dirty; runselm = qr_next(runselm,
rd_link)) {
rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
size_t npages;
if (runselm == &chunkselm->runs_dirty) {
if (rdelm == &chunkselm->rdelm) {
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
arena_chunk_t *chunk = (arena_chunk_t
*)CHUNK_ADDR2BASE(runselm);
size_t pageind = arena_miscelm_to_pageind(runselm);
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
assert(arena_mapbits_allocated_get(chunk, pageind) ==
0);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
@ -1107,21 +1108,21 @@ arena_compute_npurge(arena_t *arena, bool all)
static size_t
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
arena_chunk_map_misc_t *purge_runs_sentinel,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_chunk_map_misc_t *runselm, *runselm_next;
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
size_t nstashed = 0;
/* Stash at least npurge pages. */
for (runselm = qr_next(&arena->runs_dirty, rd_link),
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
runselm != &arena->runs_dirty; runselm = runselm_next) {
rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
size_t npages;
runselm_next = qr_next(runselm, rd_link);
rdelm_next = qr_next(rdelm, rd_link);
if (runselm == &chunkselm->runs_dirty) {
if (rdelm == &chunkselm->rdelm) {
extent_node_t *chunkselm_next;
bool zero;
UNUSED void *chunk;
@ -1144,9 +1145,11 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
chunkselm = chunkselm_next;
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
size_t pageind = arena_miscelm_to_pageind(runselm);
arena_run_t *run = &runselm->run;
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
arena_run_t *run = &miscelm->run;
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
@ -1167,12 +1170,12 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
arena_run_split_large(arena, run, run_size, false);
/* Stash. */
if (false)
qr_new(runselm, rd_link); /* Redundant. */
qr_new(rdelm, rd_link); /* Redundant. */
else {
assert(qr_next(runselm, rd_link) == runselm);
assert(qr_prev(runselm, rd_link) == runselm);
assert(qr_next(rdelm, rd_link) == rdelm);
assert(qr_prev(rdelm, rd_link) == rdelm);
}
qr_meld(purge_runs_sentinel, runselm, rd_link);
qr_meld(purge_runs_sentinel, rdelm, rd_link);
}
nstashed += npages;
@ -1184,11 +1187,12 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
}
static size_t
arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
arena_purge_stashed(arena_t *arena,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
size_t npurged, nmadvise;
arena_chunk_map_misc_t *runselm;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
if (config_stats)
@ -1196,13 +1200,12 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
npurged = 0;
malloc_mutex_unlock(&arena->lock);
for (runselm = qr_next(purge_runs_sentinel, rd_link),
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
runselm != purge_runs_sentinel; runselm = qr_next(runselm,
rd_link)) {
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
size_t npages;
if (runselm == &chunkselm->runs_dirty) {
if (rdelm == &chunkselm->rdelm) {
size_t size = extent_node_size_get(chunkselm);
bool unzeroed;
@ -1216,8 +1219,10 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
size_t pageind, run_size, flag_unzeroed, i;
bool unzeroed;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
pageind = arena_miscelm_to_pageind(runselm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
pageind = arena_miscelm_to_pageind(miscelm);
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
@ -1259,18 +1264,18 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
static void
arena_unstash_purged(arena_t *arena,
arena_chunk_map_misc_t *purge_runs_sentinel,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_chunk_map_misc_t *runselm, *runselm_next;
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
/* Deallocate runs. */
for (runselm = qr_next(purge_runs_sentinel, rd_link),
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
runselm != purge_runs_sentinel; runselm = runselm_next) {
runselm_next = qr_next(runselm, rd_link);
if (runselm == &chunkselm->runs_dirty) {
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rdelm) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
@ -1281,8 +1286,10 @@ arena_unstash_purged(arena_t *arena,
chunkselm = chunkselm_next;
chunk_dalloc_arena(arena, addr, size, zeroed);
} else {
arena_run_t *run = &runselm->run;
qr_remove(runselm, rd_link);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
arena_run_dalloc(arena, run, false, true);
}
}
@ -1292,7 +1299,7 @@ void
arena_purge(arena_t *arena, bool all)
{
size_t npurge, npurgeable, npurged;
arena_chunk_map_misc_t purge_runs_sentinel;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
/*