Fix chunk_unmap() to propagate dirty state.

Fix chunk_unmap() to propagate whether a chunk is dirty, and modify
dirty chunk purging to record this information so it can be passed to
chunk_unmap().  Since the broken version of chunk_unmap() claimed that
all chunks were clean, this resulted in potential memory corruption for
purging implementations that do not zero (e.g. MADV_FREE).

This regression was introduced by
ee41ad409a (Integrate whole chunks into
unused dirty page purging machinery.).
This commit is contained in:
Jason Evans 2015-02-17 22:25:56 -08:00
parent 47701b22ee
commit 339c2b23b2
3 changed files with 14 additions and 8 deletions

View File

@ -47,7 +47,7 @@ void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
void chunk_unmap(arena_t *arena, void *chunk, size_t size);
void chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);

View File

@ -1035,6 +1035,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
arena->ind);
assert(chunk == addr);
assert(zero == zeroed);
/*
* Create a temporary node to link into the ring of
* stashed allocations.
@ -1075,7 +1076,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
/* Append to purge_runs for later processing. */
/* Stash. */
if (false)
qr_new(runselm, rd_link); /* Redundant. */
else {
@ -1114,9 +1115,12 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
if (runselm == &chunkselm->runs_dirty) {
size_t size = extent_node_size_get(chunkselm);
bool unzeroed;
pages_purge(extent_node_addr_get(chunkselm), size);
npages = size >> LG_PAGE;
unzeroed = pages_purge(extent_node_addr_get(chunkselm),
size);
extent_node_zeroed_set(chunkselm, !unzeroed);
chunkselm = qr_next(chunkselm, cd_link);
} else {
arena_chunk_t *chunk;
@ -1180,11 +1184,13 @@ arena_unstash_purged(arena_t *arena,
if (runselm == &chunkselm->runs_dirty) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cd_link);
bool dirty = !extent_node_zeroed_get(chunkselm);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
arena_chunk_dirty_remove(chunkselm);
chunk_unmap(arena, extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm));
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
chunk_unmap(arena, dirty, addr, size);
} else {
arena_run_t *run = &runselm->run;
qr_remove(runselm, rd_link);

View File

@ -377,7 +377,7 @@ chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
}
void
chunk_unmap(arena_t *arena, void *chunk, size_t size)
chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size)
{
assert(chunk != NULL);
@ -387,10 +387,10 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size)
if (have_dss && chunk_in_dss(chunk)) {
chunk_record(arena, &arena->chunks_szad_dss,
&arena->chunks_ad_dss, false, chunk, size);
&arena->chunks_ad_dss, dirty, chunk, size);
} else if (chunk_dalloc_mmap(chunk, size)) {
chunk_record(arena, &arena->chunks_szad_mmap,
&arena->chunks_ad_mmap, false, chunk, size);
&arena->chunks_ad_mmap, dirty, chunk, size);
}
}