From 65db63cf3f0c5dd5126a1b3786756486eaf931ba Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Wed, 25 Mar 2015 18:56:55 -0700 Subject: [PATCH] Fix in-place shrinking huge reallocation purging bugs. Fix the shrinking case of huge_ralloc_no_move_similar() to purge the correct number of pages, at the correct offset. This regression was introduced by 8d6a3e8321a7767cb2ca0930b85d5d488a8cc659 (Implement dynamic per arena control over dirty page purging.). Fix huge_ralloc_no_move_shrink() to purge the correct number of pages. This bug was introduced by 9673983443a0782d975fbcb5d8457cfd411b8b56 (Purge/zero sub-chunk huge allocations as necessary.). --- src/arena.c | 7 +------ src/huge.c | 31 ++++++++++++++++--------------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/arena.c b/src/arena.c index bc13d209..30410683 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1245,16 +1245,11 @@ arena_purge_stashed(arena_t *arena, if (rdelm == &chunkselm->rd) { size_t size = extent_node_size_get(chunkselm); - void *addr, *chunk; - size_t offset; bool unzeroed; npages = size >> LG_PAGE; - addr = extent_node_addr_get(chunkselm); - chunk = CHUNK_ADDR2BASE(addr); - offset = CHUNK_ADDR2OFFSET(addr); unzeroed = chunk_purge_wrapper(arena, chunk_purge, - chunk, offset, size); + extent_node_addr_get(chunkselm), 0, size); extent_node_zeroed_set(chunkselm, !unzeroed); chunkselm = qr_next(chunkselm, cc_link); } else { diff --git a/src/huge.c b/src/huge.c index aa26f5df..32af2058 100644 --- a/src/huge.c +++ b/src/huge.c @@ -145,12 +145,11 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize, /* Fill if necessary (shrinking). */ if (oldsize > usize) { - size_t sdiff = CHUNK_CEILING(usize) - usize; - zeroed = (sdiff != 0) ? !chunk_purge_wrapper(arena, chunk_purge, - CHUNK_ADDR2BASE(ptr), CHUNK_ADDR2OFFSET(ptr), usize) : true; + size_t sdiff = oldsize - usize; + zeroed = !chunk_purge_wrapper(arena, chunk_purge, ptr, usize, + sdiff); if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, oldsize - - usize); + memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); zeroed = false; } } else @@ -186,7 +185,6 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) extent_node_t *node; arena_t *arena; chunk_purge_t *chunk_purge; - size_t sdiff; bool zeroed; node = huge_node_get(ptr); @@ -196,15 +194,18 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) chunk_purge = arena->chunk_purge; malloc_mutex_unlock(&arena->lock); - sdiff = CHUNK_CEILING(usize) - usize; - zeroed = (sdiff != 0) ? !chunk_purge_wrapper(arena, chunk_purge, - CHUNK_ADDR2BASE((uintptr_t)ptr + usize), - CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff) : true; - if (config_fill && unlikely(opt_junk_free)) { - huge_dalloc_junk((void *)((uintptr_t)ptr + usize), oldsize - - usize); - zeroed = false; - } + if (oldsize > usize) { + size_t sdiff = oldsize - usize; + zeroed = !chunk_purge_wrapper(arena, chunk_purge, + CHUNK_ADDR2BASE((uintptr_t)ptr + usize), + CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); + if (config_fill && unlikely(opt_junk_free)) { + huge_dalloc_junk((void *)((uintptr_t)ptr + usize), + sdiff); + zeroed = false; + } + } else + zeroed = true; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */