Rename "dirty chunks" to "cached chunks".

Rename "dirty chunks" to "cached chunks", in order to avoid overloading
the term "dirty".

Fix the regression caused by 339c2b23b2
(Fix chunk_unmap() to propagate dirty state.), and actually address what
that change attempted, which is to only purge chunks once, and propagate
whether zeroed pages resulted into chunk_record().
This commit is contained in:
Jason Evans 2015-02-18 01:15:50 -08:00
parent 339c2b23b2
commit 738e089a2e
7 changed files with 91 additions and 86 deletions

View File

@ -318,14 +318,14 @@ struct arena_s {
/*
* Unused dirty memory this arena manages. Dirty memory is conceptually
* tracked as an arbitrarily interleaved LRU of runs and chunks, but the
* list linkage is actually semi-duplicated in order to avoid extra
* arena_chunk_map_misc_t space overhead.
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
* chunks, but the list linkage is actually semi-duplicated in order to
* avoid extra arena_chunk_map_misc_t space overhead.
*
* LRU-----------------------------------------------------------MRU
*
* ______________ ___ ___
* ...-->|chunks_dirty|<--------->|c|<-------------------->|c|<--...
* ...-->|chunks_cache|<--------->|c|<-------------------->|c|<--...
* -------------- |h| |h|
* ____________ _____ |u| _____ _____ |u|
* ...-->|runs_dirty|<-->|run|<-->|n|<-->|run|<-->|run|<-->|n|<--...
@ -333,7 +333,7 @@ struct arena_s {
* --- ---
*/
arena_chunk_map_misc_t runs_dirty;
extent_node_t chunks_dirty;
extent_node_t chunks_cache;
/* Extant huge allocations. */
ql_head(extent_node_t) huge;
@ -347,8 +347,8 @@ struct arena_s {
* orderings are needed, which is why there are two trees with the same
* contents.
*/
extent_tree_t chunks_szad_dirty;
extent_tree_t chunks_ad_dirty;
extent_tree_t chunks_szad_cache;
extent_tree_t chunks_ad_cache;
extent_tree_t chunks_szad_mmap;
extent_tree_t chunks_ad_mmap;
extent_tree_t chunks_szad_dss;
@ -384,10 +384,10 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
void arena_chunk_dirty_maybe_insert(arena_t *arena, extent_node_t *node,
bool dirty);
void arena_chunk_dirty_maybe_remove(arena_t *arena, extent_node_t *node,
bool dirty);
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
extent_node_t *arena_node_alloc(arena_t *arena);
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,

View File

@ -45,9 +45,10 @@ void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
bool *zero, unsigned arena_ind);
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size,
bool zeroed);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
void chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size);
void chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);

View File

@ -33,9 +33,9 @@ struct extent_node_s {
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_dirty rings. */
qr(extent_node_t) cd_link;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_chunk_map_misc_t runs_dirty;
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
@ -78,6 +78,9 @@ void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, bool zeroed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
@ -183,9 +186,27 @@ JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(node, cd_link);
qr_new(&node->runs_dirty, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_chunk_map_misc_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->runs_dirty, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->runs_dirty, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */

View File

@ -12,9 +12,9 @@ arena_boot
arena_choose
arena_choose_hard
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_dirty_maybe_insert
arena_chunk_dirty_maybe_remove
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
@ -182,7 +182,9 @@ extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set

View File

@ -152,41 +152,24 @@ arena_chunk_dirty_npages(const extent_node_t *node)
return (extent_node_size_get(node) >> LG_PAGE);
}
static void
arena_chunk_dirty_insert(arena_chunk_map_misc_t *runs_dirty,
extent_node_t *chunks_dirty, extent_node_t *node)
{
qr_meld(chunks_dirty, node, cd_link);
qr_meld(runs_dirty, &node->runs_dirty, rd_link);
}
static void
arena_chunk_dirty_remove(extent_node_t *node)
{
qr_remove(node, cd_link);
qr_remove(&node->runs_dirty, rd_link);
}
void
arena_chunk_dirty_maybe_insert(arena_t *arena, extent_node_t *node, bool dirty)
arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
{
if (dirty) {
if (cache) {
extent_node_dirty_linkage_init(node);
arena_chunk_dirty_insert(&arena->runs_dirty,
&arena->chunks_dirty, node);
extent_node_dirty_insert(node, &arena->runs_dirty,
&arena->chunks_cache);
arena->ndirty += arena_chunk_dirty_npages(node);
}
}
void
arena_chunk_dirty_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
{
if (dirty) {
arena_chunk_dirty_remove(node);
extent_node_dirty_remove(node);
assert(arena->ndirty >= arena_chunk_dirty_npages(node));
arena->ndirty -= arena_chunk_dirty_npages(node);
}
@ -954,14 +937,14 @@ arena_dirty_count(arena_t *arena)
extent_node_t *chunkselm;
for (runselm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_dirty, cd_link);
chunkselm = qr_next(&arena->chunks_cache, cc_link);
runselm != &arena->runs_dirty; runselm = qr_next(runselm,
rd_link)) {
size_t npages;
if (runselm == &chunkselm->runs_dirty) {
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = qr_next(chunkselm, cd_link);
chunkselm = qr_next(chunkselm, cc_link);
} else {
arena_chunk_t *chunk = (arena_chunk_t
*)CHUNK_ADDR2BASE(runselm);
@ -1010,7 +993,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
/* Stash at least npurge pages. */
for (runselm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_dirty, cd_link);
chunkselm = qr_next(&arena->chunks_cache, cc_link);
runselm != &arena->runs_dirty; runselm = runselm_next) {
size_t npages;
runselm_next = qr_next(runselm, rd_link);
@ -1022,7 +1005,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
bool zeroed, zero;
UNUSED void *chunk;
chunkselm_next = qr_next(chunkselm, cd_link);
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Cache contents of chunkselm prior to it being
* destroyed as a side effect of allocating the chunk.
@ -1038,19 +1021,16 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
assert(zero == zeroed);
/*
* Create a temporary node to link into the ring of
* stashed allocations.
* stashed allocations. OOM shouldn't be possible
* because chunk allocation just cached a node.
*/
tnode = arena_node_alloc(arena);
/*
* OOM shouldn't be possible because chunk allocation
* just cached a node.
*/
assert(tnode != NULL);
/* Stash. */
extent_node_init(tnode, arena, addr, size, zeroed);
extent_node_dirty_linkage_init(tnode);
/* Stash. */
arena_chunk_dirty_insert(purge_runs_sentinel,
purge_chunks_sentinel, tnode);
extent_node_dirty_insert(tnode, purge_runs_sentinel,
purge_chunks_sentinel);
npages = size >> LG_PAGE;
chunkselm = chunkselm_next;
} else {
@ -1108,7 +1088,7 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
malloc_mutex_unlock(&arena->lock);
for (runselm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cd_link);
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
runselm != purge_runs_sentinel; runselm = qr_next(runselm,
rd_link)) {
size_t npages;
@ -1121,7 +1101,7 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
unzeroed = pages_purge(extent_node_addr_get(chunkselm),
size);
extent_node_zeroed_set(chunkselm, !unzeroed);
chunkselm = qr_next(chunkselm, cd_link);
chunkselm = qr_next(chunkselm, cc_link);
} else {
arena_chunk_t *chunk;
size_t pageind, run_size, flag_unzeroed, i;
@ -1178,19 +1158,19 @@ arena_unstash_purged(arena_t *arena,
/* Deallocate runs. */
for (runselm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cd_link);
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
runselm != purge_runs_sentinel; runselm = runselm_next) {
runselm_next = qr_next(runselm, rd_link);
if (runselm == &chunkselm->runs_dirty) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cd_link);
bool dirty = !extent_node_zeroed_get(chunkselm);
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
arena_chunk_dirty_remove(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
chunk_unmap(arena, dirty, addr, size);
chunk_unmap(arena, addr, size, zeroed);
} else {
arena_run_t *run = &runselm->run;
qr_remove(runselm, rd_link);
@ -2612,14 +2592,14 @@ arena_new(unsigned ind)
arena_avail_tree_new(&arena->runs_avail);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_dirty, cd_link);
qr_new(&arena->chunks_cache, cc_link);
ql_new(&arena->huge);
if (malloc_mutex_init(&arena->huge_mtx))
return (NULL);
extent_tree_szad_new(&arena->chunks_szad_dirty);
extent_tree_ad_new(&arena->chunks_ad_dirty);
extent_tree_szad_new(&arena->chunks_szad_cache);
extent_tree_ad_new(&arena->chunks_ad_cache);
extent_tree_szad_new(&arena->chunks_szad_mmap);
extent_tree_ad_new(&arena->chunks_ad_mmap);
extent_tree_szad_new(&arena->chunks_szad_dss);

View File

@ -64,7 +64,7 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
static void *
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool dirty, void *new_addr, size_t size,
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
size_t alignment, bool *zero)
{
void *ret;
@ -100,13 +100,13 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_dirty_maybe_remove(arena, node, dirty);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_dirty_maybe_insert(arena, node, dirty);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (trailsize != 0) {
@ -116,7 +116,7 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
if (node == NULL) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunks_szad, chunks_ad,
dirty, ret, size);
cache, ret, size, zeroed);
return (NULL);
}
}
@ -124,7 +124,7 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
trailsize, zeroed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_dirty_maybe_insert(arena, node, dirty);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
malloc_mutex_unlock(&arena->chunks_mtx);
@ -177,9 +177,9 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* dirty. */
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dirty,
&arena->chunks_ad_dirty, true, new_addr, size, alignment, zero)) !=
/* cache. */
if ((ret = chunk_recycle(arena, &arena->chunks_szad_cache,
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero)) !=
NULL)
return (ret);
/* "primary" dss. */
@ -276,13 +276,14 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
void
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size)
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed)
{
bool unzeroed;
extent_node_t *node, *prev;
extent_node_t key;
unzeroed = dirty ? true : pages_purge(chunk, size);
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(&arena->chunks_mtx);
@ -298,13 +299,13 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_dirty_maybe_remove(arena, node, dirty);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_dirty_maybe_insert(arena, node, dirty);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(arena);
@ -315,14 +316,14 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
* pages have already been purged, so that this is only
* a virtual memory leak.
*/
if (dirty)
if (cache)
pages_purge(chunk, size);
goto label_return;
}
extent_node_init(node, arena, chunk, size, !unzeroed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_dirty_maybe_insert(arena, node, dirty);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
/* Try to coalesce backward. */
@ -336,16 +337,16 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_dirty_maybe_remove(arena, prev, dirty);
arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_dirty_maybe_remove(arena, node, dirty);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_dirty_maybe_insert(arena, node, dirty);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(arena, prev);
}
@ -363,8 +364,8 @@ chunk_cache(arena_t *arena, void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(arena, &arena->chunks_szad_dirty, &arena->chunks_ad_dirty,
true, chunk, size);
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
true, chunk, size, false);
}
/* Default arena chunk deallocation routine in the absence of user override. */
@ -377,7 +378,7 @@ chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
}
void
chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size)
chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed)
{
assert(chunk != NULL);
@ -387,10 +388,10 @@ chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size)
if (have_dss && chunk_in_dss(chunk)) {
chunk_record(arena, &arena->chunks_szad_dss,
&arena->chunks_ad_dss, dirty, chunk, size);
&arena->chunks_ad_dss, false, chunk, size, zeroed);
} else if (chunk_dalloc_mmap(chunk, size)) {
chunk_record(arena, &arena->chunks_szad_mmap,
&arena->chunks_ad_mmap, dirty, chunk, size);
&arena->chunks_ad_mmap, false, chunk, size, zeroed);
}
}

View File

@ -137,7 +137,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
chunk_record(arena,
&arena->chunks_szad_dss,
&arena->chunks_ad_dss, false, cpad,
cpad_size);
cpad_size, false);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(