Merge branch.
This commit is contained in:
commit
3de0353352
11
ChangeLog
11
ChangeLog
@ -4,6 +4,17 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
|
|
||||||
https://github.com/jemalloc/jemalloc
|
https://github.com/jemalloc/jemalloc
|
||||||
|
|
||||||
|
* 4.2.1 (June 8, 2016)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Fix bootstrapping issues for configurations that require allocation during
|
||||||
|
tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
|
||||||
|
- Fix gettimeofday() version of nstime_update(). (@ronawho)
|
||||||
|
- Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
|
||||||
|
- Fix potential VM map fragmentation regression. (@jasone)
|
||||||
|
- Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
|
||||||
|
- Fix heap profiling context leaks in reallocation edge cases. (@jasone)
|
||||||
|
|
||||||
* 4.2.0 (May 12, 2016)
|
* 4.2.0 (May 12, 2016)
|
||||||
|
|
||||||
New features:
|
New features:
|
||||||
|
@ -754,7 +754,7 @@ sa2u(size_t size, size_t alignment)
|
|||||||
* Calculate the size of the over-size run that arena_palloc()
|
* Calculate the size of the over-size run that arena_palloc()
|
||||||
* would need to allocate in order to guarantee the alignment.
|
* would need to allocate in order to guarantee the alignment.
|
||||||
*/
|
*/
|
||||||
if (usize + large_pad + alignment <= arena_maxrun)
|
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -784,7 +784,7 @@ sa2u(size_t size, size_t alignment)
|
|||||||
* Calculate the multi-chunk mapping that huge_palloc() would need in
|
* Calculate the multi-chunk mapping that huge_palloc() would need in
|
||||||
* order to guarantee the alignment.
|
* order to guarantee the alignment.
|
||||||
*/
|
*/
|
||||||
if (usize + alignment < usize) {
|
if (usize + alignment - PAGE < usize) {
|
||||||
/* size_t overflow. */
|
/* size_t overflow. */
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
@ -513,6 +513,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|||||||
* though its actual usize was insufficient to cross the
|
* though its actual usize was insufficient to cross the
|
||||||
* sample threshold.
|
* sample threshold.
|
||||||
*/
|
*/
|
||||||
|
prof_alloc_rollback(tsd, tctx, true);
|
||||||
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2687,7 +2687,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
alignment = PAGE_CEILING(alignment);
|
alignment = PAGE_CEILING(alignment);
|
||||||
alloc_size = usize + large_pad + alignment;
|
alloc_size = usize + large_pad + alignment - PAGE;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
|
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
|
||||||
|
88
src/chunk.c
88
src/chunk.c
@ -421,15 +421,11 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||||
bool *commit, unsigned arena_ind)
|
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
tsdn_t *tsdn;
|
|
||||||
arena_t *arena;
|
|
||||||
|
|
||||||
tsdn = tsdn_fetch();
|
|
||||||
arena = chunk_arena_get(tsdn, arena_ind);
|
|
||||||
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
||||||
commit, arena->dss_prec);
|
commit, arena->dss_prec);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
@ -440,6 +436,20 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *
|
||||||
|
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
|
bool *commit, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
tsdn_t *tsdn;
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
|
tsdn = tsdn_fetch();
|
||||||
|
arena = chunk_arena_get(tsdn, arena_ind);
|
||||||
|
|
||||||
|
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
|
||||||
|
zero, commit));
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
@ -472,14 +482,23 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
||||||
alignment, zero, commit);
|
alignment, zero, commit);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
|
if (chunk_hooks->alloc == chunk_alloc_default) {
|
||||||
commit, arena->ind);
|
/* Call directly to propagate tsdn. */
|
||||||
|
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
|
||||||
|
size, alignment, zero, commit);
|
||||||
|
} else {
|
||||||
|
ret = chunk_hooks->alloc(new_addr, size, alignment,
|
||||||
|
zero, commit, arena->ind);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
if (config_valgrind && chunk_hooks->alloc !=
|
||||||
|
chunk_alloc_default)
|
||||||
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -590,20 +609,31 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
arena_maybe_purge(tsdn, arena);
|
arena_maybe_purge(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_dalloc_default_impl(tsdn_t *tsdn, void *chunk, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (!have_dss || !chunk_in_dss(tsdn, chunk))
|
||||||
|
return (chunk_dalloc_mmap(chunk, size));
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||||
unsigned arena_ind)
|
unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
tsdn_t *tsdn;
|
||||||
|
|
||||||
if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
|
tsdn = tsdn_fetch();
|
||||||
return (chunk_dalloc_mmap(chunk, size));
|
|
||||||
return (true);
|
return (chunk_dalloc_default_impl(tsdn, chunk, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *chunk, size_t size, bool zeroed, bool committed)
|
void *chunk, size_t size, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
|
bool err;
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||||
@ -612,7 +642,13 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
/* Try to deallocate. */
|
/* Try to deallocate. */
|
||||||
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
|
if (chunk_hooks->dalloc == chunk_dalloc_default) {
|
||||||
|
/* Call directly to propagate tsdn. */
|
||||||
|
err = chunk_dalloc_default_impl(tsdn, chunk, size);
|
||||||
|
} else
|
||||||
|
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
|
||||||
|
|
||||||
|
if (!err)
|
||||||
return;
|
return;
|
||||||
/* Try to decommit; purge if that fails. */
|
/* Try to decommit; purge if that fails. */
|
||||||
if (committed) {
|
if (committed) {
|
||||||
@ -681,26 +717,34 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
chunk_merge_default_impl(tsdn_t *tsdn, void *chunk_a, void *chunk_b)
|
||||||
bool committed, unsigned arena_ind)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
if (!maps_coalesce)
|
if (!maps_coalesce)
|
||||||
return (true);
|
return (true);
|
||||||
if (have_dss) {
|
if (have_dss && chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn,
|
||||||
tsdn_t *tsdn = tsdn_fetch();
|
chunk_b))
|
||||||
if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
|
return (true);
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||||
|
bool committed, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
tsdn_t *tsdn;
|
||||||
|
|
||||||
|
tsdn = tsdn_fetch();
|
||||||
|
|
||||||
|
return (chunk_merge_default_impl(tsdn, chunk_a, chunk_b));
|
||||||
|
}
|
||||||
|
|
||||||
static rtree_node_elm_t *
|
static rtree_node_elm_t *
|
||||||
chunks_rtree_node_alloc(size_t nelms)
|
chunks_rtree_node_alloc(size_t nelms)
|
||||||
{
|
{
|
||||||
|
|
||||||
return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
|
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
|
||||||
sizeof(rtree_node_elm_t)));
|
sizeof(rtree_node_elm_t)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
|
|||||||
void *ret;
|
void *ret;
|
||||||
size_t alloc_size;
|
size_t alloc_size;
|
||||||
|
|
||||||
alloc_size = size + alignment;
|
alloc_size = size + alignment - PAGE;
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
10
src/huge.c
10
src/huge.c
@ -262,19 +262,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
|
||||||
* that it is possible to make correct junk/zero fill decisions below.
|
* update extent's zeroed field, and zero as necessary.
|
||||||
*/
|
*/
|
||||||
is_zeroed_chunk = zero;
|
is_zeroed_chunk = false;
|
||||||
|
|
||||||
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
|
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
|
||||||
&is_zeroed_chunk))
|
&is_zeroed_chunk))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
|
||||||
huge_node_unset(ptr, node);
|
huge_node_unset(ptr, node);
|
||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||||
|
is_zeroed_chunk);
|
||||||
huge_node_reset(tsdn, ptr, node);
|
huge_node_reset(tsdn, ptr, node);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
|
@ -1739,7 +1739,7 @@ je_calloc(size_t num, size_t size)
|
|||||||
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
|
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
|
||||||
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
|
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
|
||||||
UTRACE(0, num_size, ret);
|
UTRACE(0, num_size, ret);
|
||||||
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -2222,7 +2222,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
|||||||
|
|
||||||
prof_active = prof_active_get_unlocked();
|
prof_active = prof_active_get_unlocked();
|
||||||
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
|
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
|
||||||
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
|
tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||||
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
|
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
|
||||||
alignment, zero, tcache, arena, tctx);
|
alignment, zero, tcache, arena, tctx);
|
||||||
@ -2231,7 +2231,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
|||||||
tcache, arena);
|
tcache, arena);
|
||||||
}
|
}
|
||||||
if (unlikely(p == NULL)) {
|
if (unlikely(p == NULL)) {
|
||||||
prof_alloc_rollback(tsd, tctx, true);
|
prof_alloc_rollback(tsd, tctx, false);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2246,7 +2246,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
|||||||
*/
|
*/
|
||||||
*usize = isalloc(tsd_tsdn(tsd), p, config_prof);
|
*usize = isalloc(tsd_tsdn(tsd), p, config_prof);
|
||||||
}
|
}
|
||||||
prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
|
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
|
||||||
old_usize, old_tctx);
|
old_usize, old_tctx);
|
||||||
|
|
||||||
return (p);
|
return (p);
|
||||||
|
@ -128,9 +128,11 @@ nstime_update(nstime_t *time)
|
|||||||
time->ns = ts.tv_sec * BILLION + ts.tv_nsec;
|
time->ns = ts.tv_sec * BILLION + ts.tv_nsec;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
struct timeval tv;
|
{
|
||||||
gettimeofday(&tv, NULL);
|
struct timeval tv;
|
||||||
time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
|
gettimeofday(&tv, NULL);
|
||||||
|
time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Handle non-monotonic clocks. */
|
/* Handle non-monotonic clocks. */
|
||||||
|
Loading…
Reference in New Issue
Block a user