From 4fb7f513376c0bb73fa1e4e1e89966af9cb2b9ec Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Wed, 27 Jan 2010 18:27:09 -0800 Subject: [PATCH] Fix a chunk leak in chunk_alloc_mmap(). A missing 'else' in chunk_alloc_mmap() caused an extra chunk to be allocated every time the optimistic alignment path was entered, since the following block would always be executed immediately afterward. This chunk leak caused no increase in physical memory usage, but virtual memory could grow until resource exaustion caused allocation failures. --- jemalloc/src/jemalloc_arena.c | 13 ++++++++++++- jemalloc/src/jemalloc_chunk_mmap.c | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/jemalloc/src/jemalloc_arena.c b/jemalloc/src/jemalloc_arena.c index fa84f664..e1e1b8f6 100644 --- a/jemalloc/src/jemalloc_arena.c +++ b/jemalloc/src/jemalloc_arena.c @@ -848,11 +848,22 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) /* Insert into runs_avail, now that coalescing is complete. */ arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]); - /* Deallocate chunk if it is now completely unused. */ + /* + * Deallocate chunk if it is now completely unused. The bit + * manipulation checks whether the first run is unallocated and extends + * to the end of the chunk. + */ if ((chunk->map[arena_chunk_header_npages].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) == arena_maxclass) arena_chunk_dealloc(arena, chunk); + /* + * It is okay to do dirty page processing even if the chunk was + * deallocated above, since in that case it is the spare. Waiting + * until after possible chunk deallocation to do dirty processing + * allows for an old spare to be fully deallocated, thus decreasing the + * chances of spuriously crossing the dirty page purging threshold. + */ if (dirty) { if (chunk->dirtied == false) { arena_chunk_tree_dirty_insert(&arena->chunks_dirty, diff --git a/jemalloc/src/jemalloc_chunk_mmap.c b/jemalloc/src/jemalloc_chunk_mmap.c index 8e2c8048..d5702f20 100644 --- a/jemalloc/src/jemalloc_chunk_mmap.c +++ b/jemalloc/src/jemalloc_chunk_mmap.c @@ -184,7 +184,7 @@ chunk_alloc_mmap(size_t size) offset)); } } - } + } else ret = chunk_alloc_mmap_slow(size, false); return (ret);