Fix chunk cache races.

These regressions were introduced by
ee41ad409a (Integrate whole chunks into
unused dirty page purging machinery.).
This commit is contained in:
Jason Evans 2015-02-18 16:40:53 -08:00
parent 738e089a2e
commit 99bd94fb65
5 changed files with 263 additions and 139 deletions

View File

@ -399,6 +399,7 @@ void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
size_t oldsize, size_t usize); size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero); size_t oldsize, size_t usize, bool *zero);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena); void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
index_t binind, uint64_t prof_accumbytes); index_t binind, uint64_t prof_accumbytes);

View File

@ -39,16 +39,21 @@ extern size_t chunk_npages;
bool chunk_register(const void *chunk, const extent_node_t *node); bool chunk_register(const void *chunk, const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size); void *chunk_alloc_base(size_t size);
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc, void *chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size,
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr, size_t alignment, bool *zero, bool dalloc_node);
size_t size, size_t alignment, bool *zero);
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment, void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
bool *zero, unsigned arena_ind); bool *zero, unsigned arena_ind);
void *chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc,
void *new_addr, size_t size, size_t alignment, bool *zero);
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad, void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size,
bool zeroed); bool zeroed);
void chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size);
void chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size,
bool zeroed);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind); bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
void chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed); void chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc,
void *chunk, size_t size);
bool chunk_boot(void); bool chunk_boot(void);
void chunk_prefork(void); void chunk_prefork(void);
void chunk_postfork_parent(void); void chunk_postfork_parent(void);

View File

@ -53,6 +53,7 @@ arena_mapbitsp_read
arena_mapbitsp_write arena_mapbitsp_write
arena_maxclass arena_maxclass
arena_maxrun arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add arena_metadata_allocated_add
arena_metadata_allocated_get arena_metadata_allocated_get
arena_metadata_allocated_sub arena_metadata_allocated_sub
@ -124,14 +125,18 @@ bootstrap_free
bootstrap_malloc bootstrap_malloc
bt_init bt_init
buferror buferror
chunk_alloc_arena chunk_alloc_cache
chunk_alloc_base chunk_alloc_base
chunk_alloc_default chunk_alloc_default
chunk_alloc_dss chunk_alloc_dss
chunk_alloc_mmap chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot chunk_boot
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_default chunk_dalloc_default
chunk_dalloc_mmap chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister chunk_deregister
chunk_dss_boot chunk_dss_boot
chunk_dss_postfork_child chunk_dss_postfork_child
@ -147,7 +152,6 @@ chunk_postfork_parent
chunk_prefork chunk_prefork
chunk_record chunk_record
chunk_register chunk_register
chunk_unmap
chunks_rtree chunks_rtree
chunksize chunksize
chunksize_mask chunksize_mask

View File

@ -20,7 +20,6 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition. * definition.
*/ */
static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk);
static void arena_purge(arena_t *arena, bool all); static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned); bool cleaned);
@ -427,27 +426,53 @@ arena_chunk_init_spare(arena_t *arena)
return (chunk); return (chunk);
} }
static bool
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
{
extent_node_init(&chunk->node, arena, chunk, chunksize, zero);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(chunk, &chunk->node));
}
static arena_chunk_t *
arena_chunk_alloc_internal_hard(arena_t *arena, bool *zero)
{
arena_chunk_t *chunk;
chunk_alloc_t *chunk_alloc = arena->chunk_alloc;
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_alloc, NULL,
chunksize, chunksize, zero);
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)chunk,
chunksize);
chunk = NULL;
}
malloc_mutex_lock(&arena->lock);
return (chunk);
}
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc_internal(arena_t *arena, bool *zero) arena_chunk_alloc_internal(arena_t *arena, bool *zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
chunk_alloc = arena->chunk_alloc; if (likely(arena->chunk_alloc == chunk_alloc_default)) {
chunk_dalloc = arena->chunk_dalloc; chunk = chunk_alloc_cache(arena, NULL, chunksize, chunksize,
malloc_mutex_unlock(&arena->lock); zero, true);
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc, if (chunk != NULL && arena_chunk_register(arena, chunk,
arena->ind, NULL, chunksize, chunksize, zero); *zero)) {
if (chunk != NULL) { chunk_dalloc_cache(arena, chunk, chunksize);
extent_node_init(&chunk->node, arena, chunk, chunksize, *zero); return (NULL);
extent_node_achunk_set(&chunk->node, true);
if (chunk_register(chunk, &chunk->node)) {
chunk_dalloc((void *)chunk, chunksize, arena->ind);
chunk = NULL;
} }
} } else
malloc_mutex_lock(&arena->lock); chunk = NULL;
if (chunk == NULL)
chunk = arena_chunk_alloc_internal_hard(arena, zero);
if (config_stats && chunk != NULL) { if (config_stats && chunk != NULL) {
arena->stats.mapped += chunksize; arena->stats.mapped += chunksize;
arena->stats.metadata_mapped += (map_bias << LG_PAGE); arena->stats.metadata_mapped += (map_bias << LG_PAGE);
@ -553,11 +578,19 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
arena_run_dirty_remove(arena, spare, map_bias, arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias); chunk_npages-map_bias);
} }
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk_deregister(spare, &spare->node); chunk_deregister(spare, &spare->node);
chunk_dalloc((void *)spare, chunksize, arena->ind);
malloc_mutex_lock(&arena->lock); chunk_dalloc = arena->chunk_dalloc;
if (likely(chunk_dalloc == chunk_dalloc_default))
chunk_dalloc_cache(arena, (void *)spare, chunksize);
else {
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)spare,
chunksize);
malloc_mutex_lock(&arena->lock);
}
if (config_stats) { if (config_stats) {
arena->stats.mapped -= chunksize; arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE); arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
@ -661,28 +694,14 @@ arena_node_dalloc(arena_t *arena, extent_node_t *node)
malloc_mutex_unlock(&arena->node_cache_mtx); malloc_mutex_unlock(&arena->node_cache_mtx);
} }
void * static void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, arena_chunk_alloc_huge_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
bool *zero) size_t usize, size_t alignment, bool *zero, size_t csize)
{ {
void *ret; void *ret;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
size_t csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock); ret = chunk_alloc_wrapper(arena, chunk_alloc, NULL, csize, alignment,
chunk_alloc = arena->chunk_alloc; zero);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL,
csize, alignment, zero);
if (ret == NULL) { if (ret == NULL) {
/* Revert optimistic stats updates. */ /* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
@ -692,12 +711,42 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
} }
arena->nactive -= (usize >> LG_PAGE); arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL);
} }
if (config_stats) return (ret);
stats_cactive_add(usize); }
void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero)
{
void *ret;
chunk_alloc_t *chunk_alloc;
size_t csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
chunk_alloc = arena->chunk_alloc;
if (likely(chunk_alloc == chunk_alloc_default)) {
ret = chunk_alloc_cache(arena, NULL, csize, alignment, zero,
true);
} else
ret = NULL;
malloc_mutex_unlock(&arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(arena, chunk_alloc, usize,
alignment, zero, csize);
}
if (config_stats && ret != NULL)
stats_cactive_add(usize);
return (ret); return (ret);
} }
@ -705,7 +754,9 @@ void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{ {
chunk_dalloc_t *chunk_dalloc; chunk_dalloc_t *chunk_dalloc;
size_t csize;
csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc; chunk_dalloc = arena->chunk_dalloc;
if (config_stats) { if (config_stats) {
@ -714,8 +765,14 @@ arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
stats_cactive_sub(usize); stats_cactive_sub(usize);
} }
arena->nactive -= (usize >> LG_PAGE); arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind); if (likely(chunk_dalloc == chunk_dalloc_default)) {
chunk_dalloc_cache(arena, chunk, csize);
malloc_mutex_unlock(&arena->lock);
} else {
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_wrapper(arena, chunk_dalloc, chunk, csize);
}
} }
void void
@ -747,12 +804,10 @@ void
arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
size_t usize) size_t usize)
{ {
chunk_dalloc_t *chunk_dalloc;
size_t udiff = oldsize - usize; size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) { if (cdiff != 0) {
@ -761,52 +816,81 @@ arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
} }
} }
arena->nactive -= udiff >> LG_PAGE; arena->nactive -= udiff >> LG_PAGE;
malloc_mutex_unlock(&arena->lock);
if (cdiff != 0) { if (cdiff != 0) {
chunk_dalloc((void *)((uintptr_t)chunk + CHUNK_CEILING(usize)), chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
cdiff, arena->ind); void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
if (likely(chunk_dalloc == chunk_dalloc_default)) {
chunk_dalloc_cache(arena, nchunk, cdiff);
malloc_mutex_unlock(&arena->lock);
} else {
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_wrapper(arena, chunk_dalloc, nchunk,
cdiff);
}
} else
malloc_mutex_unlock(&arena->lock);
}
bool
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff,
size_t cdiff)
{
bool err;
err = (chunk_alloc_wrapper(arena, chunk_alloc, nchunk, cdiff, chunksize,
zero) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
} }
return (err);
} }
bool bool
arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
size_t usize, bool *zero) size_t usize, bool *zero)
{ {
bool err;
chunk_alloc_t *chunk_alloc; chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc; void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize; size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc; /* Optimistically update stats. */
if (config_stats) { if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff; arena->stats.mapped += cdiff;
} }
arena->nactive += (udiff >> LG_PAGE); arena->nactive += (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, chunk_alloc = arena->chunk_alloc;
(void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)), cdiff, if (likely(chunk_alloc == chunk_alloc_default)) {
chunksize, zero) == NULL) { err = (chunk_alloc_cache(arena, nchunk, cdiff, chunksize, zero,
/* Revert optimistic stats updates. */ true) == NULL);
malloc_mutex_lock(&arena->lock); } else
if (config_stats) { err = true;
arena_huge_ralloc_stats_update_undo(arena, malloc_mutex_unlock(&arena->lock);
oldsize, usize); if (err) {
arena->stats.mapped -= cdiff; err = arena_chunk_ralloc_huge_expand_hard(arena, chunk_alloc,
} oldsize, usize, zero, nchunk, udiff, cdiff);
arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
return (true);
} }
if (config_stats) if (config_stats && !err)
stats_cactive_add(udiff); stats_cactive_add(udiff);
return (err);
return (false);
} }
static arena_run_t * static arena_run_t *
@ -909,7 +993,7 @@ arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
return (arena_run_alloc_small_helper(arena, size, binind)); return (arena_run_alloc_small_helper(arena, size, binind));
} }
JEMALLOC_INLINE_C void void
arena_maybe_purge(arena_t *arena) arena_maybe_purge(arena_t *arena)
{ {
size_t threshold; size_t threshold;
@ -999,39 +1083,25 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
runselm_next = qr_next(runselm, rd_link); runselm_next = qr_next(runselm, rd_link);
if (runselm == &chunkselm->runs_dirty) { if (runselm == &chunkselm->runs_dirty) {
extent_node_t *chunkselm_next, *tnode; extent_node_t *chunkselm_next;
void *addr; bool zero;
size_t size;
bool zeroed, zero;
UNUSED void *chunk; UNUSED void *chunk;
chunkselm_next = qr_next(chunkselm, cc_link); chunkselm_next = qr_next(chunkselm, cc_link);
/* /*
* Cache contents of chunkselm prior to it being * Allocate. chunkselm remains valid due to the
* destroyed as a side effect of allocating the chunk. * dalloc_node=false argument to chunk_alloc_cache().
*/ */
addr = extent_node_addr_get(chunkselm);
size = extent_node_size_get(chunkselm);
zeroed = extent_node_zeroed_get(chunkselm);
/* Allocate. */
zero = false; zero = false;
chunk = arena->chunk_alloc(addr, size, chunksize, &zero, chunk = chunk_alloc_cache(arena,
arena->ind); extent_node_addr_get(chunkselm),
assert(chunk == addr); extent_node_size_get(chunkselm), chunksize, &zero,
assert(zero == zeroed); false);
/* assert(chunk == extent_node_addr_get(chunkselm));
* Create a temporary node to link into the ring of assert(zero == extent_node_zeroed_get(chunkselm));
* stashed allocations. OOM shouldn't be possible extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
* because chunk allocation just cached a node.
*/
tnode = arena_node_alloc(arena);
assert(tnode != NULL);
/* Stash. */
extent_node_init(tnode, arena, addr, size, zeroed);
extent_node_dirty_linkage_init(tnode);
extent_node_dirty_insert(tnode, purge_runs_sentinel,
purge_chunks_sentinel); purge_chunks_sentinel);
npages = size >> LG_PAGE; npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
} else { } else {
arena_chunk_t *chunk = arena_chunk_t *chunk =
@ -1170,7 +1240,7 @@ arena_unstash_purged(arena_t *arena,
extent_node_dirty_remove(chunkselm); extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm); arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
chunk_unmap(arena, addr, size, zeroed); chunk_dalloc_arena(arena, addr, size, zeroed);
} else { } else {
arena_run_t *run = &runselm->run; arena_run_t *run = &runselm->run;
qr_remove(runselm, rd_link); qr_remove(runselm, rd_link);

View File

@ -65,7 +65,7 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
static void * static void *
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad, chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size, extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
size_t alignment, bool *zero) size_t alignment, bool *zero, bool dalloc_node)
{ {
void *ret; void *ret;
extent_node_t *node; extent_node_t *node;
@ -74,6 +74,7 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
bool zeroed; bool zeroed;
assert(new_addr == NULL || alignment == chunksize); assert(new_addr == NULL || alignment == chunksize);
assert(dalloc_node || new_addr != NULL);
alloc_size = size + alignment - chunksize; alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */ /* Beware size_t wrap-around. */
@ -129,7 +130,8 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
} }
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
if (node != NULL) assert(!dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node); arena_node_dalloc(arena, node);
if (*zero) { if (*zero) {
if (!zeroed) if (!zeroed)
@ -153,8 +155,8 @@ chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
void *ret; void *ret;
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss, if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero)) != &arena->chunks_ad_dss, false, new_addr, size, alignment, zero,
NULL) true)) != NULL)
return (ret); return (ret);
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero); ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
return (ret); return (ret);
@ -177,11 +179,6 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
/* cache. */
if ((ret = chunk_recycle(arena, &arena->chunks_szad_cache,
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero)) !=
NULL)
return (ret);
/* "primary" dss. */ /* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret = if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) != chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
@ -190,7 +187,7 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
/* mmap. */ /* mmap. */
if (!config_munmap && (ret = chunk_recycle(arena, if (!config_munmap && (ret = chunk_recycle(arena,
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr, &arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
size, alignment, zero)) != NULL) size, alignment, zero, true)) != NULL)
return (ret); return (ret);
/* /*
* Requesting an address is not implemented for chunk_alloc_mmap(), so * Requesting an address is not implemented for chunk_alloc_mmap(), so
@ -231,19 +228,18 @@ chunk_alloc_base(size_t size)
} }
void * void *
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc, chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
unsigned arena_ind, void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
bool *zero)
{ {
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind); assert(size != 0);
if (ret == NULL) assert((size & chunksize_mask) == 0);
return (NULL); assert(alignment != 0);
if (config_valgrind) assert((alignment & chunksize_mask) == 0);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret); return (chunk_recycle(arena, &arena->chunks_szad_cache,
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero,
dalloc_node));
} }
static arena_t * static arena_t *
@ -262,7 +258,27 @@ chunk_arena_get(unsigned arena_ind)
return (arena); return (arena);
} }
/* Default arena chunk allocation routine in the absence of user override. */ static void *
chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero)
{
void *ret;
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
arena->dss_prec);
if (ret == NULL)
return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
/*
* Default arena chunk allocation routine in the absence of user override. This
* function isn't actually used by jemalloc, but it does the right thing if the
* application passes calls through to it during chunk allocation.
*/
void * void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind) unsigned arena_ind)
@ -270,8 +286,21 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
arena_t *arena; arena_t *arena;
arena = chunk_arena_get(arena_ind); arena = chunk_arena_get(arena_ind);
return (chunk_alloc_core(arena, new_addr, size, alignment, zero, return (chunk_alloc_arena(arena, new_addr, size, alignment, zero));
arena->dss_prec)); }
void *
chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc, void *new_addr,
size_t size, size_t alignment, bool *zero)
{
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero, arena->ind);
if (ret == NULL)
return (NULL);
if (config_valgrind && chunk_alloc != chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, chunksize);
return (ret);
} }
void void
@ -355,8 +384,8 @@ label_return:
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
} }
static void void
chunk_cache(arena_t *arena, void *chunk, size_t size) chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
{ {
assert(chunk != NULL); assert(chunk != NULL);
@ -366,19 +395,11 @@ chunk_cache(arena_t *arena, void *chunk, size_t size)
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache, chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
true, chunk, size, false); true, chunk, size, false);
} arena_maybe_purge(arena);
/* Default arena chunk deallocation routine in the absence of user override. */
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
{
chunk_cache(chunk_arena_get(arena_ind), chunk, size);
return (false);
} }
void void
chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed) chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size, bool zeroed)
{ {
assert(chunk != NULL); assert(chunk != NULL);
@ -395,6 +416,29 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed)
} }
} }
/*
* Default arena chunk deallocation routine in the absence of user override.
* This function isn't actually used by jemalloc, but it does the right thing if
* the application passes calls through to it during chunk deallocation.
*/
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
{
chunk_dalloc_arena(chunk_arena_get(arena_ind), chunk, size, false);
return (false);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc, void *chunk,
size_t size)
{
chunk_dalloc(chunk, size, arena->ind);
if (config_valgrind && chunk_dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static rtree_node_elm_t * static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms) chunks_rtree_node_alloc(size_t nelms)
{ {