Refactor arena_mapbits unzeroed flag management.

Only set the unzeroed flag when initializing the entire mapbits entry,
rather than mutating just the unzeroed bit.  This simplifies the
possible mapbits state transitions.
This commit is contained in:
Jason Evans 2015-08-10 23:03:34 -07:00
parent de249c8679
commit 45186f0c07
4 changed files with 35 additions and 37 deletions

View File

@ -534,14 +534,14 @@ void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size);
void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
size_t flags);
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
index_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, index_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
@ -783,6 +783,15 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
& ~CHUNK_MAP_SIZE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
@ -831,18 +840,6 @@ arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || !unzeroed);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
unzeroed);
}
JEMALLOC_INLINE void
arena_metadata_allocated_add(arena_t *arena, size_t size)
{

View File

@ -42,6 +42,7 @@ arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
@ -52,7 +53,6 @@ arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbits_unzeroed_set
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write

View File

@ -642,7 +642,7 @@ arena_chunk_init_hard(arena_t *arena)
{
arena_chunk_t *chunk;
bool zero, commit;
size_t unzeroed, decommitted, i;
size_t flag_unzeroed, flag_decommitted, i;
assert(arena->spare == NULL);
@ -657,10 +657,10 @@ arena_chunk_init_hard(arena_t *arena)
* the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
* chunk.
*/
unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed |
decommitted);
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
flag_unzeroed | flag_decommitted);
/*
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
@ -672,7 +672,7 @@ arena_chunk_init_hard(arena_t *arena)
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
@ -681,12 +681,12 @@ arena_chunk_init_hard(arena_t *arena)
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
flag_unzeroed);
}
}
}
arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
unzeroed);
flag_unzeroed);
return (chunk);
}
@ -1391,8 +1391,8 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
npages = size >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
size_t pageind, run_size, flag_unzeroed, i;
bool unzeroed, decommitted;
size_t pageind, run_size, flag_unzeroed, flags, i;
bool decommitted;
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
arena_chunk_map_misc_t *miscelm =
@ -1408,20 +1408,21 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
decommitted = !chunk_hooks->decommit(chunk, chunksize,
pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
if (decommitted) {
arena_mapbits_large_set(chunk, pageind+npages-1,
0, CHUNK_MAP_DECOMMITTED);
arena_mapbits_large_set(chunk, pageind,
run_size, CHUNK_MAP_DECOMMITTED);
unzeroed = false;
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
unzeroed = chunk_purge_wrapper(arena,
flag_unzeroed = chunk_purge_wrapper(arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size);
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
}
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
arena_mapbits_large_set(chunk, pageind+npages-1, 0,
flags);
arena_mapbits_large_set(chunk, pageind, run_size,
flags);
/*
* Set the unzeroed flag for all pages, now that
* Set the unzeroed flag for internal pages, now that
* chunk_purge_wrapper() has returned whether the pages
* were zeroed as a side effect of purging. This chunk
* map modification is safe even though the arena mutex
@ -1431,8 +1432,8 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
* writes don't perturb the first and last elements'
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
*/
for (i = 0; i < npages; i++) {
arena_mapbits_unzeroed_set(chunk, pageind+i,
for (i = 1; i < npages-1; i++) {
arena_mapbits_internal_set(chunk, pageind+i,
flag_unzeroed);
}
}

View File

@ -102,7 +102,7 @@ pages_commit_impl(void *addr, size_t size, bool commit)
{
#ifndef _WIN32
if (config_debug) {
if (false &&/*XXX*/ config_debug) {
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
MAP_FIXED, -1, 0);