Merge branch.

This commit is contained in:
Jason Evans 2016-06-08 11:41:24 -07:00
commit 3de0353352
9 changed files with 96 additions and 38 deletions

View File

@ -4,6 +4,17 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.2.1 (June 8, 2016)
Bug fixes:
- Fix bootstrapping issues for configurations that require allocation during
tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
- Fix gettimeofday() version of nstime_update(). (@ronawho)
- Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
- Fix potential VM map fragmentation regression. (@jasone)
- Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
- Fix heap profiling context leaks in reallocation edge cases. (@jasone)
* 4.2.0 (May 12, 2016)
New features:

View File

@ -754,7 +754,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize + large_pad + alignment <= arena_maxrun)
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
return (usize);
}
@ -784,7 +784,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment < usize) {
if (usize + alignment - PAGE < usize) {
/* size_t overflow. */
return (0);
}

View File

@ -513,6 +513,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}

View File

@ -2687,7 +2687,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment;
alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);

View File

@ -421,15 +421,11 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
}
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
@ -440,6 +436,20 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
return (ret);
}
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
zero, commit));
}
static void *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
@ -472,14 +482,23 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
if (ret == NULL) {
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
commit, arena->ind);
if (ret == NULL)
return (NULL);
if (chunk_hooks->alloc == chunk_alloc_default) {
/* Call directly to propagate tsdn. */
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
size, alignment, zero, commit);
} else {
ret = chunk_hooks->alloc(new_addr, size, alignment,
zero, commit, arena->ind);
}
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
if (ret == NULL)
return (NULL);
if (config_valgrind && chunk_hooks->alloc !=
chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
}
return (ret);
}
@ -590,20 +609,31 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_maybe_purge(tsdn, arena);
}
static bool
chunk_dalloc_default_impl(tsdn_t *tsdn, void *chunk, size_t size)
{
if (!have_dss || !chunk_in_dss(tsdn, chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
tsdn_t *tsdn;
if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
tsdn = tsdn_fetch();
return (chunk_dalloc_default_impl(tsdn, chunk, size));
}
void
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
{
bool err;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
@ -612,7 +642,13 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
if (chunk_hooks->dalloc == chunk_dalloc_default) {
/* Call directly to propagate tsdn. */
err = chunk_dalloc_default_impl(tsdn, chunk, size);
} else
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (!err)
return;
/* Try to decommit; purge if that fails. */
if (committed) {
@ -681,26 +717,34 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
}
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
chunk_merge_default_impl(tsdn_t *tsdn, void *chunk_a, void *chunk_b)
{
if (!maps_coalesce)
return (true);
if (have_dss) {
tsdn_t *tsdn = tsdn_fetch();
if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
if (have_dss && chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn,
chunk_b))
return (true);
}
return (false);
}
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
{
tsdn_t *tsdn;
tsdn = tsdn_fetch();
return (chunk_merge_default_impl(tsdn, chunk_a, chunk_b));
}
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
sizeof(rtree_node_elm_t)));
}

View File

@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *ret;
size_t alloc_size;
alloc_size = size + alignment;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);

View File

@ -262,19 +262,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/*
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
* that it is possible to make correct junk/zero fill decisions below.
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
* update extent's zeroed field, and zero as necessary.
*/
is_zeroed_chunk = zero;
is_zeroed_chunk = false;
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
is_zeroed_chunk);
huge_node_reset(tsdn, ptr, node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);

View File

@ -1739,7 +1739,7 @@ je_calloc(size_t num, size_t size)
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
}
return (ret);
@ -2222,7 +2222,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
alignment, zero, tcache, arena, tctx);
@ -2231,7 +2231,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
tcache, arena);
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
prof_alloc_rollback(tsd, tctx, false);
return (NULL);
}
@ -2246,7 +2246,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
*/
*usize = isalloc(tsd_tsdn(tsd), p, config_prof);
}
prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
old_usize, old_tctx);
return (p);

View File

@ -128,9 +128,11 @@ nstime_update(nstime_t *time)
time->ns = ts.tv_sec * BILLION + ts.tv_nsec;
}
#else
{
struct timeval tv;
gettimeofday(&tv, NULL);
time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
}
#endif
/* Handle non-monotonic clocks. */