Fix potential chunk leaks.

Move chunk_dalloc_arena()'s implementation into chunk_dalloc_wrapper(),
so that if the dalloc hook fails, proper decommit/purge/retain cascading
occurs.  This fixes three potential chunk leaks on OOM paths, one during
dss-based chunk allocation, one during chunk header commit (currently
relevant only on Windows), and one during rtree write (e.g. if rtree
node allocation fails).

Merge chunk_purge_arena() into chunk_purge_default() (refactor, no
change to functionality).
This commit is contained in:
Jason Evans 2016-03-30 18:36:04 -07:00
parent 0bc716ae27
commit ce7c0f999b
5 changed files with 26 additions and 51 deletions

View File

@ -62,12 +62,8 @@ void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed);
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
size_t length);
void *chunk, size_t size, bool zeroed, bool committed);
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);

View File

@ -152,7 +152,6 @@ chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
@ -172,7 +171,6 @@ chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_purge_arena
chunk_purge_wrapper
chunk_register
chunks_rtree

View File

@ -602,8 +602,8 @@ arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(arena, chunk_hooks,
(void *)chunk, chunksize, *commit);
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, *zero, *commit);
chunk = NULL;
}
}
@ -614,7 +614,7 @@ arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, *commit);
chunksize, *zero, *commit);
chunk = NULL;
}
@ -1010,7 +1010,7 @@ arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_unlock(&arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero,
true);
err = true;
}
@ -1036,8 +1036,8 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
}
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL);
err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize,
zero, true) == NULL);
malloc_mutex_unlock(&arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
@ -1045,7 +1045,7 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero,
true);
err = true;
}
@ -1699,7 +1699,7 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_arena(arena, chunk_hooks, addr, size,
chunk_dalloc_wrapper(arena, chunk_hooks, addr, size,
zeroed, committed);
} else {
arena_chunk_t *chunk =

View File

@ -425,8 +425,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
arena_t *arena;
arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit,
arena->dss_prec);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@ -579,8 +579,18 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
arena_maybe_purge(arena);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool zeroed, bool committed)
{
@ -604,27 +614,6 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static bool
chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
@ -643,8 +632,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
assert(chunk != NULL);
@ -657,15 +647,6 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length));
}
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, size_t offset, size_t length)

View File

@ -136,7 +136,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(arena,
&chunk_hooks, cpad, cpad_size,
true);
false, true);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(