Fix a chunk leak in chunk_alloc_mmap().

A missing 'else' in chunk_alloc_mmap() caused an extra chunk to be
allocated every time the optimistic alignment path was entered, since
the following block would always be executed immediately afterward.
This chunk leak caused no increase in physical memory usage, but virtual
memory could grow until resource exaustion caused allocation failures.
This commit is contained in:
Jason Evans 2010-01-27 18:27:09 -08:00
parent 95833311f1
commit 4fb7f51337
2 changed files with 13 additions and 2 deletions

View File

@ -848,11 +848,22 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
/* Insert into runs_avail, now that coalescing is complete. */ /* Insert into runs_avail, now that coalescing is complete. */
arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]); arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
/* Deallocate chunk if it is now completely unused. */ /*
* Deallocate chunk if it is now completely unused. The bit
* manipulation checks whether the first run is unallocated and extends
* to the end of the chunk.
*/
if ((chunk->map[arena_chunk_header_npages].bits & (~PAGE_MASK | if ((chunk->map[arena_chunk_header_npages].bits & (~PAGE_MASK |
CHUNK_MAP_ALLOCATED)) == arena_maxclass) CHUNK_MAP_ALLOCATED)) == arena_maxclass)
arena_chunk_dealloc(arena, chunk); arena_chunk_dealloc(arena, chunk);
/*
* It is okay to do dirty page processing even if the chunk was
* deallocated above, since in that case it is the spare. Waiting
* until after possible chunk deallocation to do dirty processing
* allows for an old spare to be fully deallocated, thus decreasing the
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty) { if (dirty) {
if (chunk->dirtied == false) { if (chunk->dirtied == false) {
arena_chunk_tree_dirty_insert(&arena->chunks_dirty, arena_chunk_tree_dirty_insert(&arena->chunks_dirty,

View File

@ -184,7 +184,7 @@ chunk_alloc_mmap(size_t size)
offset)); offset));
} }
} }
} } else
ret = chunk_alloc_mmap_slow(size, false); ret = chunk_alloc_mmap_slow(size, false);
return (ret); return (ret);