Fix chunk cache races.

These regressions were introduced by
ee41ad409a (Integrate whole chunks into
unused dirty page purging machinery.).
This commit is contained in:
Jason Evans 2015-02-18 16:40:53 -08:00
parent 738e089a2e
commit 99bd94fb65
5 changed files with 263 additions and 139 deletions

View File

@ -399,6 +399,7 @@ void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
index_t binind, uint64_t prof_accumbytes);

View File

@ -39,16 +39,21 @@ extern size_t chunk_npages;
bool chunk_register(const void *chunk, const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr,
size_t size, size_t alignment, bool *zero);
void *chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool dalloc_node);
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
bool *zero, unsigned arena_ind);
void *chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc,
void *new_addr, size_t size, size_t alignment, bool *zero);
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size,
bool zeroed);
void chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size);
void chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size,
bool zeroed);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
void chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed);
void chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc,
void *chunk, size_t size);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);

View File

@ -53,6 +53,7 @@ arena_mapbitsp_read
arena_mapbitsp_write
arena_maxclass
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
@ -124,14 +125,18 @@ bootstrap_free
bootstrap_malloc
bt_init
buferror
chunk_alloc_arena
chunk_alloc_cache
chunk_alloc_base
chunk_alloc_default
chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_default
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_postfork_child
@ -147,7 +152,6 @@ chunk_postfork_parent
chunk_prefork
chunk_record
chunk_register
chunk_unmap
chunks_rtree
chunksize
chunksize_mask

View File

@ -20,7 +20,6 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition.
*/
static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk);
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned);
@ -427,27 +426,53 @@ arena_chunk_init_spare(arena_t *arena)
return (chunk);
}
static bool
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
{
extent_node_init(&chunk->node, arena, chunk, chunksize, zero);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(chunk, &chunk->node));
}
static arena_chunk_t *
arena_chunk_alloc_internal_hard(arena_t *arena, bool *zero)
{
arena_chunk_t *chunk;
chunk_alloc_t *chunk_alloc = arena->chunk_alloc;
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_alloc, NULL,
chunksize, chunksize, zero);
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)chunk,
chunksize);
chunk = NULL;
}
malloc_mutex_lock(&arena->lock);
return (chunk);
}
static arena_chunk_t *
arena_chunk_alloc_internal(arena_t *arena, bool *zero)
{
arena_chunk_t *chunk;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
arena->ind, NULL, chunksize, chunksize, zero);
if (chunk != NULL) {
extent_node_init(&chunk->node, arena, chunk, chunksize, *zero);
extent_node_achunk_set(&chunk->node, true);
if (chunk_register(chunk, &chunk->node)) {
chunk_dalloc((void *)chunk, chunksize, arena->ind);
if (likely(arena->chunk_alloc == chunk_alloc_default)) {
chunk = chunk_alloc_cache(arena, NULL, chunksize, chunksize,
zero, true);
if (chunk != NULL && arena_chunk_register(arena, chunk,
*zero)) {
chunk_dalloc_cache(arena, chunk, chunksize);
return (NULL);
}
} else
chunk = NULL;
}
}
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
chunk = arena_chunk_alloc_internal_hard(arena, zero);
if (config_stats && chunk != NULL) {
arena->stats.mapped += chunksize;
arena->stats.metadata_mapped += (map_bias << LG_PAGE);
@ -553,11 +578,19 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk_deregister(spare, &spare->node);
chunk_dalloc((void *)spare, chunksize, arena->ind);
chunk_dalloc = arena->chunk_dalloc;
if (likely(chunk_dalloc == chunk_dalloc_default))
chunk_dalloc_cache(arena, (void *)spare, chunksize);
else {
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)spare,
chunksize);
malloc_mutex_lock(&arena->lock);
}
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
@ -661,28 +694,14 @@ arena_node_dalloc(arena_t *arena, extent_node_t *node)
malloc_mutex_unlock(&arena->node_cache_mtx);
}
void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero)
static void *
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
size_t usize, size_t alignment, bool *zero, size_t csize)
{
void *ret;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
size_t csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL,
csize, alignment, zero);
ret = chunk_alloc_wrapper(arena, chunk_alloc, NULL, csize, alignment,
zero);
if (ret == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
@ -692,12 +711,42 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
}
arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
if (config_stats)
stats_cactive_add(usize);
return (ret);
}
void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero)
{
void *ret;
chunk_alloc_t *chunk_alloc;
size_t csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
chunk_alloc = arena->chunk_alloc;
if (likely(chunk_alloc == chunk_alloc_default)) {
ret = chunk_alloc_cache(arena, NULL, csize, alignment, zero,
true);
} else
ret = NULL;
malloc_mutex_unlock(&arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(arena, chunk_alloc, usize,
alignment, zero, csize);
}
if (config_stats && ret != NULL)
stats_cactive_add(usize);
return (ret);
}
@ -705,7 +754,9 @@ void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{
chunk_dalloc_t *chunk_dalloc;
size_t csize;
csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
@ -714,8 +765,14 @@ arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
stats_cactive_sub(usize);
}
arena->nactive -= (usize >> LG_PAGE);
if (likely(chunk_dalloc == chunk_dalloc_default)) {
chunk_dalloc_cache(arena, chunk, csize);
malloc_mutex_unlock(&arena->lock);
chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind);
} else {
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_wrapper(arena, chunk_dalloc, chunk, csize);
}
}
void
@ -747,12 +804,10 @@ void
arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
{
chunk_dalloc_t *chunk_dalloc;
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) {
@ -761,52 +816,81 @@ arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
}
}
arena->nactive -= udiff >> LG_PAGE;
malloc_mutex_unlock(&arena->lock);
if (cdiff != 0) {
chunk_dalloc((void *)((uintptr_t)chunk + CHUNK_CEILING(usize)),
cdiff, arena->ind);
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
if (likely(chunk_dalloc == chunk_dalloc_default)) {
chunk_dalloc_cache(arena, nchunk, cdiff);
malloc_mutex_unlock(&arena->lock);
} else {
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_wrapper(arena, chunk_dalloc, nchunk,
cdiff);
}
} else
malloc_mutex_unlock(&arena->lock);
}
bool
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff,
size_t cdiff)
{
bool err;
err = (chunk_alloc_wrapper(arena, chunk_alloc, nchunk, cdiff, chunksize,
zero) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
}
return (err);
}
bool
arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
size_t usize, bool *zero)
{
bool err;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
/* Optimistically update stats. */
if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
arena->nactive += (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
(void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)), cdiff,
chunksize, zero) == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena,
oldsize, usize);
arena->stats.mapped -= cdiff;
}
arena->nactive -= (udiff >> LG_PAGE);
chunk_alloc = arena->chunk_alloc;
if (likely(chunk_alloc == chunk_alloc_default)) {
err = (chunk_alloc_cache(arena, nchunk, cdiff, chunksize, zero,
true) == NULL);
} else
err = true;
malloc_mutex_unlock(&arena->lock);
return (true);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(arena, chunk_alloc,
oldsize, usize, zero, nchunk, udiff, cdiff);
}
if (config_stats)
if (config_stats && !err)
stats_cactive_add(udiff);
return (false);
return (err);
}
static arena_run_t *
@ -909,7 +993,7 @@ arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
return (arena_run_alloc_small_helper(arena, size, binind));
}
JEMALLOC_INLINE_C void
void
arena_maybe_purge(arena_t *arena)
{
size_t threshold;
@ -999,39 +1083,25 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
runselm_next = qr_next(runselm, rd_link);
if (runselm == &chunkselm->runs_dirty) {
extent_node_t *chunkselm_next, *tnode;
void *addr;
size_t size;
bool zeroed, zero;
extent_node_t *chunkselm_next;
bool zero;
UNUSED void *chunk;
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Cache contents of chunkselm prior to it being
* destroyed as a side effect of allocating the chunk.
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
*/
addr = extent_node_addr_get(chunkselm);
size = extent_node_size_get(chunkselm);
zeroed = extent_node_zeroed_get(chunkselm);
/* Allocate. */
zero = false;
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
arena->ind);
assert(chunk == addr);
assert(zero == zeroed);
/*
* Create a temporary node to link into the ring of
* stashed allocations. OOM shouldn't be possible
* because chunk allocation just cached a node.
*/
tnode = arena_node_alloc(arena);
assert(tnode != NULL);
/* Stash. */
extent_node_init(tnode, arena, addr, size, zeroed);
extent_node_dirty_linkage_init(tnode);
extent_node_dirty_insert(tnode, purge_runs_sentinel,
chunk = chunk_alloc_cache(arena,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
npages = size >> LG_PAGE;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = chunkselm_next;
} else {
arena_chunk_t *chunk =
@ -1170,7 +1240,7 @@ arena_unstash_purged(arena_t *arena,
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
chunk_unmap(arena, addr, size, zeroed);
chunk_dalloc_arena(arena, addr, size, zeroed);
} else {
arena_run_t *run = &runselm->run;
qr_remove(runselm, rd_link);

View File

@ -65,7 +65,7 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
static void *
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
size_t alignment, bool *zero)
size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
extent_node_t *node;
@ -74,6 +74,7 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
bool zeroed;
assert(new_addr == NULL || alignment == chunksize);
assert(dalloc_node || new_addr != NULL);
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
@ -129,7 +130,8 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
}
malloc_mutex_unlock(&arena->chunks_mtx);
if (node != NULL)
assert(!dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
if (*zero) {
if (!zeroed)
@ -153,8 +155,8 @@ chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
void *ret;
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero)) !=
NULL)
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero,
true)) != NULL)
return (ret);
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
return (ret);
@ -177,11 +179,6 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* cache. */
if ((ret = chunk_recycle(arena, &arena->chunks_szad_cache,
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero)) !=
NULL)
return (ret);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
@ -190,7 +187,7 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
/* mmap. */
if (!config_munmap && (ret = chunk_recycle(arena,
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
size, alignment, zero)) != NULL)
size, alignment, zero, true)) != NULL)
return (ret);
/*
* Requesting an address is not implemented for chunk_alloc_mmap(), so
@ -231,19 +228,18 @@ chunk_alloc_base(size_t size)
}
void *
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
unsigned arena_ind, void *new_addr, size_t size, size_t alignment,
bool *zero)
chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool dalloc_node)
{
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
if (ret == NULL)
return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
return (ret);
return (chunk_recycle(arena, &arena->chunks_szad_cache,
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero,
dalloc_node));
}
static arena_t *
@ -262,7 +258,27 @@ chunk_arena_get(unsigned arena_ind)
return (arena);
}
/* Default arena chunk allocation routine in the absence of user override. */
static void *
chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero)
{
void *ret;
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
arena->dss_prec);
if (ret == NULL)
return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
/*
* Default arena chunk allocation routine in the absence of user override. This
* function isn't actually used by jemalloc, but it does the right thing if the
* application passes calls through to it during chunk allocation.
*/
void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind)
@ -270,8 +286,21 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
arena_t *arena;
arena = chunk_arena_get(arena_ind);
return (chunk_alloc_core(arena, new_addr, size, alignment, zero,
arena->dss_prec));
return (chunk_alloc_arena(arena, new_addr, size, alignment, zero));
}
void *
chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc, void *new_addr,
size_t size, size_t alignment, bool *zero)
{
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero, arena->ind);
if (ret == NULL)
return (NULL);
if (config_valgrind && chunk_alloc != chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, chunksize);
return (ret);
}
void
@ -355,8 +384,8 @@ label_return:
malloc_mutex_unlock(&arena->chunks_mtx);
}
static void
chunk_cache(arena_t *arena, void *chunk, size_t size)
void
chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
{
assert(chunk != NULL);
@ -366,19 +395,11 @@ chunk_cache(arena_t *arena, void *chunk, size_t size)
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
true, chunk, size, false);
}
/* Default arena chunk deallocation routine in the absence of user override. */
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
{
chunk_cache(chunk_arena_get(arena_ind), chunk, size);
return (false);
arena_maybe_purge(arena);
}
void
chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed)
chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size, bool zeroed)
{
assert(chunk != NULL);
@ -395,6 +416,29 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed)
}
}
/*
* Default arena chunk deallocation routine in the absence of user override.
* This function isn't actually used by jemalloc, but it does the right thing if
* the application passes calls through to it during chunk deallocation.
*/
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
{
chunk_dalloc_arena(chunk_arena_get(arena_ind), chunk, size, false);
return (false);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc, void *chunk,
size_t size)
{
chunk_dalloc(chunk, size, arena->ind);
if (config_valgrind && chunk_dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{