Fix large calloc() zeroing bugs.
Refactor code such that arena_mapbits_{large,small}_set() always preserves the unzeroed flag, and manually manipulate the unzeroed flag in the one case where it actually gets reset (in arena_chunk_purge()). This fixes unzeroed preservation bugs in arena_run_split() and arena_ralloc_large_grow(). These bugs caused large calloc() to return non-zeroed memory under some circumstances.
This commit is contained in:
parent
30fe12b866
commit
d8ceef6c55
@ -71,6 +71,7 @@ found in the git revision history:
|
|||||||
write-after-free memory corruption.
|
write-after-free memory corruption.
|
||||||
- Fix a potential deadlock that could occur during interval- and
|
- Fix a potential deadlock that could occur during interval- and
|
||||||
growth-triggered heap profile dumps.
|
growth-triggered heap profile dumps.
|
||||||
|
- Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
|
||||||
- Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
|
- Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
|
||||||
cause memory corruption and crashes with --enable-dss specified.
|
cause memory corruption and crashes with --enable-dss specified.
|
||||||
- Fix fork-related bugs that could cause deadlock in children between fork
|
- Fix fork-related bugs that could cause deadlock in children between fork
|
||||||
|
@ -591,6 +591,7 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|||||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||||
|
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
|
||||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
|
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -611,12 +612,14 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|||||||
size_t flags)
|
size_t flags)
|
||||||
{
|
{
|
||||||
size_t *mapbitsp;
|
size_t *mapbitsp;
|
||||||
|
size_t unzeroed;
|
||||||
|
|
||||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
assert((flags & CHUNK_MAP_DIRTY) == flags);
|
||||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
|
||||||
CHUNK_MAP_ALLOCATED;
|
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
|
||||||
|
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
@ -637,13 +640,15 @@ arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
|||||||
size_t binind, size_t flags)
|
size_t binind, size_t flags)
|
||||||
{
|
{
|
||||||
size_t *mapbitsp;
|
size_t *mapbitsp;
|
||||||
|
size_t unzeroed;
|
||||||
|
|
||||||
assert(binind < BININD_INVALID);
|
assert(binind < BININD_INVALID);
|
||||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
assert(pageind - runind >= map_bias);
|
assert(pageind - runind >= map_bias);
|
||||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
assert((flags & CHUNK_MAP_DIRTY) == flags);
|
||||||
|
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
|
||||||
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
|
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
|
||||||
flags | CHUNK_MAP_ALLOCATED;
|
flags | unzeroed | CHUNK_MAP_ALLOCATED;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
|
43
src/arena.c
43
src/arena.c
@ -311,8 +311,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
|||||||
* small run, so that arena_dalloc_bin_run() has the ability to
|
* small run, so that arena_dalloc_bin_run() has the ability to
|
||||||
* conditionally trim clean pages.
|
* conditionally trim clean pages.
|
||||||
*/
|
*/
|
||||||
arena_mapbits_small_set(chunk, run_ind, 0, binind,
|
arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
|
||||||
arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty);
|
|
||||||
/*
|
/*
|
||||||
* The first page will always be dirtied during small run
|
* The first page will always be dirtied during small run
|
||||||
* initialization, so a validation failure here would not
|
* initialization, so a validation failure here would not
|
||||||
@ -322,16 +321,13 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
|||||||
arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
|
arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
|
||||||
arena_chunk_validate_zeroed(chunk, run_ind);
|
arena_chunk_validate_zeroed(chunk, run_ind);
|
||||||
for (i = 1; i < need_pages - 1; i++) {
|
for (i = 1; i < need_pages - 1; i++) {
|
||||||
arena_mapbits_small_set(chunk, run_ind+i, i,
|
arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
|
||||||
binind, arena_mapbits_unzeroed_get(chunk,
|
|
||||||
run_ind+i));
|
|
||||||
if (config_debug && flag_dirty == 0 &&
|
if (config_debug && flag_dirty == 0 &&
|
||||||
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
|
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
|
||||||
arena_chunk_validate_zeroed(chunk, run_ind+i);
|
arena_chunk_validate_zeroed(chunk, run_ind+i);
|
||||||
}
|
}
|
||||||
arena_mapbits_small_set(chunk, run_ind+need_pages-1,
|
arena_mapbits_small_set(chunk, run_ind+need_pages-1,
|
||||||
need_pages-1, binind, arena_mapbits_unzeroed_get(chunk,
|
need_pages-1, binind, flag_dirty);
|
||||||
run_ind+need_pages-1) | flag_dirty);
|
|
||||||
if (config_debug && flag_dirty == 0 &&
|
if (config_debug && flag_dirty == 0 &&
|
||||||
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
|
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
|
||||||
0) {
|
0) {
|
||||||
@ -612,8 +608,10 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
arena_avail_tree_remove(
|
arena_avail_tree_remove(
|
||||||
&arena->runs_avail_dirty, mapelm);
|
&arena->runs_avail_dirty, mapelm);
|
||||||
|
|
||||||
|
arena_mapbits_unzeroed_set(chunk, pageind,
|
||||||
|
flag_unzeroed);
|
||||||
arena_mapbits_large_set(chunk, pageind,
|
arena_mapbits_large_set(chunk, pageind,
|
||||||
(npages << LG_PAGE), flag_unzeroed);
|
(npages << LG_PAGE), 0);
|
||||||
/*
|
/*
|
||||||
* Update internal elements in the page map, so
|
* Update internal elements in the page map, so
|
||||||
* that CHUNK_MAP_UNZEROED is properly set.
|
* that CHUNK_MAP_UNZEROED is properly set.
|
||||||
@ -623,8 +621,10 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
pageind+i, flag_unzeroed);
|
pageind+i, flag_unzeroed);
|
||||||
}
|
}
|
||||||
if (npages > 1) {
|
if (npages > 1) {
|
||||||
|
arena_mapbits_unzeroed_set(chunk,
|
||||||
|
pageind+npages-1, flag_unzeroed);
|
||||||
arena_mapbits_large_set(chunk,
|
arena_mapbits_large_set(chunk,
|
||||||
pageind+npages-1, 0, flag_unzeroed);
|
pageind+npages-1, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -979,10 +979,8 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
* run first, in case of single-page runs.
|
* run first, in case of single-page runs.
|
||||||
*/
|
*/
|
||||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
|
||||||
arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
|
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
|
||||||
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
|
|
||||||
arena_mapbits_unzeroed_get(chunk, pageind));
|
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
||||||
@ -991,8 +989,8 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
assert(arena_mapbits_dirty_get(chunk,
|
assert(arena_mapbits_dirty_get(chunk,
|
||||||
pageind+head_npages+tail_npages-1) == flag_dirty);
|
pageind+head_npages+tail_npages-1) == flag_dirty);
|
||||||
}
|
}
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty
|
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
|
||||||
| arena_mapbits_unzeroed_get(chunk, pageind+head_npages));
|
flag_dirty);
|
||||||
|
|
||||||
arena_run_dalloc(arena, run, false);
|
arena_run_dalloc(arena, run, false);
|
||||||
}
|
}
|
||||||
@ -1013,10 +1011,8 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
* run first, in case of single-page runs.
|
* run first, in case of single-page runs.
|
||||||
*/
|
*/
|
||||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
|
||||||
arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
|
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
|
||||||
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
|
|
||||||
arena_mapbits_unzeroed_get(chunk, pageind));
|
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
||||||
@ -1026,8 +1022,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
pageind+head_npages+tail_npages-1) == flag_dirty);
|
pageind+head_npages+tail_npages-1) == flag_dirty);
|
||||||
}
|
}
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
|
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
|
||||||
flag_dirty | arena_mapbits_unzeroed_get(chunk,
|
flag_dirty);
|
||||||
pageind+head_npages));
|
|
||||||
|
|
||||||
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
|
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
|
||||||
dirty);
|
dirty);
|
||||||
@ -1535,10 +1530,8 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
npages) {
|
npages) {
|
||||||
/* Trim clean pages. Convert to large run beforehand. */
|
/* Trim clean pages. Convert to large run beforehand. */
|
||||||
assert(npages > 0);
|
assert(npages > 0);
|
||||||
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
|
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
|
||||||
arena_mapbits_unzeroed_get(chunk, run_ind));
|
arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
|
||||||
arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
|
|
||||||
arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
|
|
||||||
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
|
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
|
||||||
((past - run_ind) << LG_PAGE), false);
|
((past - run_ind) << LG_PAGE), false);
|
||||||
/* npages = past - run_ind; */
|
/* npages = past - run_ind; */
|
||||||
|
Loading…
Reference in New Issue
Block a user