From 397e5111b5efd49f61f73c1bad0375c7885a6128 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Sat, 16 Oct 2010 16:10:40 -0700 Subject: [PATCH] Preserve CHUNK_MAP_UNZEROED for small runs. Preserve CHUNK_MAP_UNZEROED when allocating small runs, because it is possible that untouched pages will be returned to the tree of clean runs, where the CHUNK_MAP_UNZEROED flag matters. Prior to the conversion from CHUNK_MAP_ZEROED, this was already a bug, but in the worst case extra zeroing occurred. After the conversion, this bug made it possible to incorrectly treat pages as pre-zeroed. --- jemalloc/src/arena.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c index 7c99d436..8cc35f5f 100644 --- a/jemalloc/src/arena.c +++ b/jemalloc/src/arena.c @@ -389,14 +389,18 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, * arena_dalloc_bin_run() has the ability to conditionally trim * clean pages. */ - chunk->map[run_ind-map_bias].bits = CHUNK_MAP_ALLOCATED | - flag_dirty; + chunk->map[run_ind-map_bias].bits = + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) | + CHUNK_MAP_ALLOCATED | flag_dirty; for (i = 1; i < need_pages - 1; i++) { chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT) - | CHUNK_MAP_ALLOCATED; + | (chunk->map[run_ind+i-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED; } chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages - - 1) << PAGE_SHIFT) | CHUNK_MAP_ALLOCATED | flag_dirty; + - 1) << PAGE_SHIFT) | + (chunk->map[run_ind+need_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty; } }