Fix chunk_unmap() to propagate dirty state.
Fix chunk_unmap() to propagate whether a chunk is dirty, and modify
dirty chunk purging to record this information so it can be passed to
chunk_unmap(). Since the broken version of chunk_unmap() claimed that
all chunks were clean, this resulted in potential memory corruption for
purging implementations that do not zero (e.g. MADV_FREE).
This regression was introduced by
ee41ad409a
(Integrate whole chunks into
unused dirty page purging machinery.).
This commit is contained in:
parent
47701b22ee
commit
339c2b23b2
@ -47,7 +47,7 @@ void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
|||||||
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
|
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
|
||||||
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
||||||
void chunk_unmap(arena_t *arena, void *chunk, size_t size);
|
void chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(void);
|
||||||
void chunk_postfork_parent(void);
|
void chunk_postfork_parent(void);
|
||||||
|
14
src/arena.c
14
src/arena.c
@ -1035,6 +1035,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
|||||||
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
|
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
|
||||||
arena->ind);
|
arena->ind);
|
||||||
assert(chunk == addr);
|
assert(chunk == addr);
|
||||||
|
assert(zero == zeroed);
|
||||||
/*
|
/*
|
||||||
* Create a temporary node to link into the ring of
|
* Create a temporary node to link into the ring of
|
||||||
* stashed allocations.
|
* stashed allocations.
|
||||||
@ -1075,7 +1076,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
|||||||
|
|
||||||
/* Temporarily allocate the free dirty run. */
|
/* Temporarily allocate the free dirty run. */
|
||||||
arena_run_split_large(arena, run, run_size, false);
|
arena_run_split_large(arena, run, run_size, false);
|
||||||
/* Append to purge_runs for later processing. */
|
/* Stash. */
|
||||||
if (false)
|
if (false)
|
||||||
qr_new(runselm, rd_link); /* Redundant. */
|
qr_new(runselm, rd_link); /* Redundant. */
|
||||||
else {
|
else {
|
||||||
@ -1114,9 +1115,12 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
|
|||||||
|
|
||||||
if (runselm == &chunkselm->runs_dirty) {
|
if (runselm == &chunkselm->runs_dirty) {
|
||||||
size_t size = extent_node_size_get(chunkselm);
|
size_t size = extent_node_size_get(chunkselm);
|
||||||
|
bool unzeroed;
|
||||||
|
|
||||||
pages_purge(extent_node_addr_get(chunkselm), size);
|
|
||||||
npages = size >> LG_PAGE;
|
npages = size >> LG_PAGE;
|
||||||
|
unzeroed = pages_purge(extent_node_addr_get(chunkselm),
|
||||||
|
size);
|
||||||
|
extent_node_zeroed_set(chunkselm, !unzeroed);
|
||||||
chunkselm = qr_next(chunkselm, cd_link);
|
chunkselm = qr_next(chunkselm, cd_link);
|
||||||
} else {
|
} else {
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
@ -1180,11 +1184,13 @@ arena_unstash_purged(arena_t *arena,
|
|||||||
if (runselm == &chunkselm->runs_dirty) {
|
if (runselm == &chunkselm->runs_dirty) {
|
||||||
extent_node_t *chunkselm_next = qr_next(chunkselm,
|
extent_node_t *chunkselm_next = qr_next(chunkselm,
|
||||||
cd_link);
|
cd_link);
|
||||||
|
bool dirty = !extent_node_zeroed_get(chunkselm);
|
||||||
|
void *addr = extent_node_addr_get(chunkselm);
|
||||||
|
size_t size = extent_node_size_get(chunkselm);
|
||||||
arena_chunk_dirty_remove(chunkselm);
|
arena_chunk_dirty_remove(chunkselm);
|
||||||
chunk_unmap(arena, extent_node_addr_get(chunkselm),
|
|
||||||
extent_node_size_get(chunkselm));
|
|
||||||
arena_node_dalloc(arena, chunkselm);
|
arena_node_dalloc(arena, chunkselm);
|
||||||
chunkselm = chunkselm_next;
|
chunkselm = chunkselm_next;
|
||||||
|
chunk_unmap(arena, dirty, addr, size);
|
||||||
} else {
|
} else {
|
||||||
arena_run_t *run = &runselm->run;
|
arena_run_t *run = &runselm->run;
|
||||||
qr_remove(runselm, rd_link);
|
qr_remove(runselm, rd_link);
|
||||||
|
@ -377,7 +377,7 @@ chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -387,10 +387,10 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
|||||||
|
|
||||||
if (have_dss && chunk_in_dss(chunk)) {
|
if (have_dss && chunk_in_dss(chunk)) {
|
||||||
chunk_record(arena, &arena->chunks_szad_dss,
|
chunk_record(arena, &arena->chunks_szad_dss,
|
||||||
&arena->chunks_ad_dss, false, chunk, size);
|
&arena->chunks_ad_dss, dirty, chunk, size);
|
||||||
} else if (chunk_dalloc_mmap(chunk, size)) {
|
} else if (chunk_dalloc_mmap(chunk, size)) {
|
||||||
chunk_record(arena, &arena->chunks_szad_mmap,
|
chunk_record(arena, &arena->chunks_szad_mmap,
|
||||||
&arena->chunks_ad_mmap, false, chunk, size);
|
&arena->chunks_ad_mmap, dirty, chunk, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user