Add extent serial numbers.

Add extent serial numbers and use them where appropriate as a sort key
that is higher priority than address, so that the allocation policy
prefers older extents.

This resolves #147.
This commit is contained in:
Jason Evans 2016-11-14 18:27:23 -08:00
parent 45f83a2ac6
commit 5c77af98b1
10 changed files with 299 additions and 164 deletions

View File

@ -374,10 +374,12 @@ struct arena_s {
dss_prec_t dss_prec;
/* Extant arena chunks. */
ql_head(extent_node_t) achunks;
/* Extent serial number generator state. */
size_t extent_sn_next;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@ -453,9 +455,9 @@ struct arena_s {
* orderings are needed, which is why there are two trees with the same
* contents.
*/
extent_tree_t chunks_szad_cached;
extent_tree_t chunks_szsnad_cached;
extent_tree_t chunks_ad_cached;
extent_tree_t chunks_szad_retained;
extent_tree_t chunks_szsnad_retained;
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
@ -522,13 +524,13 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero);
size_t alignment, size_t *sn, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize);
size_t usize, size_t sn);
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
void *chunk, size_t oldsize, size_t usize, size_t sn);
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize, bool *zero);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
@ -601,6 +603,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
void arena_boot(void);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);

View File

@ -58,15 +58,16 @@ void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, bool dalloc_node);
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
size_t *sn, bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);

View File

@ -18,6 +18,20 @@ struct extent_node_s {
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
@ -45,8 +59,8 @@ struct extent_node_s {
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
/* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
@ -61,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
@ -73,6 +87,7 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
@ -80,12 +95,13 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, bool zeroed, bool committed);
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
@ -114,6 +130,13 @@ extent_node_size_get(const extent_node_t *node)
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
@ -164,6 +187,13 @@ extent_node_size_set(extent_node_t *node, size_t size)
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
@ -194,12 +224,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
bool zeroed, bool committed)
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);

View File

@ -36,6 +36,7 @@ arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_sn_next
arena_get
arena_ichoose
arena_init
@ -218,6 +219,8 @@ extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_destroy
@ -239,25 +242,25 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_destroy
extent_tree_szad_destroy_recurse
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
extent_tree_szsnad_destroy
extent_tree_szsnad_destroy_recurse
extent_tree_szsnad_empty
extent_tree_szsnad_first
extent_tree_szsnad_insert
extent_tree_szsnad_iter
extent_tree_szsnad_iter_recurse
extent_tree_szsnad_iter_start
extent_tree_szsnad_last
extent_tree_szsnad_new
extent_tree_szsnad_next
extent_tree_szsnad_nsearch
extent_tree_szsnad_prev
extent_tree_szsnad_psearch
extent_tree_szsnad_remove
extent_tree_szsnad_reverse_iter
extent_tree_szsnad_reverse_iter_recurse
extent_tree_szsnad_reverse_iter_start
extent_tree_szsnad_search
ffs_llu
ffs_lu
ffs_u

View File

@ -38,8 +38,8 @@ static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
bool dirty, bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
arena_bin_t *bin);
/******************************************************************************/
@ -55,8 +55,31 @@ arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_INLINE_C const extent_node_t *
arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
return (&chunk->node);
}
JEMALLOC_INLINE_C int
arena_run_addr_comp(const arena_chunk_map_misc_t *a,
arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
{
uint64_t a_sn, b_sn;
assert(a != NULL);
assert(b != NULL);
a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
return ((a_sn > b_sn) - (a_sn < b_sn));
}
JEMALLOC_INLINE_C int
arena_ad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
@ -68,9 +91,26 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
JEMALLOC_INLINE_C int
arena_snad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
int ret;
assert(a != NULL);
assert(b != NULL);
ret = arena_sn_comp(a, b);
if (ret != 0)
return (ret);
ret = arena_ad_comp(a, b);
return (ret);
}
/* Generate pairing heap functions. */
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
ph_link, arena_run_addr_comp)
ph_link, arena_snad_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
@ -529,7 +569,7 @@ arena_chunk_init_spare(arena_t *arena)
static bool
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
bool zero)
size_t sn, bool zero)
{
/*
@ -538,7 +578,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
}
@ -548,28 +588,30 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
NULL, chunksize, chunksize, zero, commit);
NULL, chunksize, chunksize, &sn, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
(void *)chunk, chunksize, *zero, *commit);
(void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL;
}
}
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
*zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
chunksize, *zero, *commit);
chunksize, sn, *zero, *commit);
chunk = NULL;
}
@ -583,13 +625,14 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t sn;
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, zero, commit, true);
chunksize, &sn, zero, commit, true);
if (chunk != NULL) {
if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, true);
chunksize, sn, true);
return (NULL);
}
}
@ -684,11 +727,13 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
size_t sn;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
sn = extent_node_sn_get(&chunk->node);
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
@ -703,7 +748,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
}
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
committed);
sn, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
@ -859,14 +904,14 @@ arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
static void *
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
size_t csize)
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
bool *zero, size_t csize)
{
void *ret;
bool commit = true;
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
alignment, zero, &commit);
alignment, sn, zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@ -883,7 +928,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
void *
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero)
size_t alignment, size_t *sn, bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
@ -900,18 +945,19 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, usize >> LG_PAGE);
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
alignment, zero, &commit, true);
alignment, sn, zero, &commit, true);
malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
usize, alignment, zero, csize);
usize, alignment, sn, zero, csize);
}
return (ret);
}
void
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
size_t sn)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
@ -924,7 +970,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
}
arena_nactive_sub(arena, usize >> LG_PAGE);
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
malloc_mutex_unlock(tsdn, &arena->lock);
}
@ -948,7 +994,7 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
void
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t oldsize, size_t usize)
size_t oldsize, size_t usize, size_t sn)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@ -967,7 +1013,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
CHUNK_CEILING(usize));
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
true);
sn, true);
}
malloc_mutex_unlock(tsdn, &arena->lock);
}
@ -975,13 +1021,13 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
static bool
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
bool *zero, void *nchunk, size_t udiff, size_t cdiff)
size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
chunksize, zero, &commit) == NULL);
chunksize, sn, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@ -995,7 +1041,7 @@ arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
*zero, true);
*sn, *zero, true);
err = true;
}
return (err);
@ -1010,6 +1056,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
size_t sn;
bool commit = true;
malloc_mutex_lock(tsdn, &arena->lock);
@ -1022,16 +1069,16 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
chunksize, zero, &commit, true) == NULL);
chunksize, &sn, zero, &commit, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
&chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
cdiff);
&chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
udiff, cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
*zero, true);
sn, *zero, true);
err = true;
}
@ -1519,6 +1566,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
size_t sn;
bool zero, commit;
UNUSED void *chunk;
@ -1536,8 +1584,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
commit = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
&commit, false);
extent_node_size_get(chunkselm), chunksize, &sn,
&zero, &commit, false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
@ -1703,13 +1751,14 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
size, zeroed, committed);
size, sn, zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@ -2315,7 +2364,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
arena_dalloc_bin_run(tsdn, arena, chunk, run,
bin);
} else
arena_bin_lower_run(arena, chunk, run, bin);
arena_bin_lower_run(arena, run, bin);
}
return (ret);
}
@ -2820,16 +2869,18 @@ arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
static void
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
{
/*
* Make sure that if bin->runcur is non-NULL, it refers to the lowest
* non-full run. It is okay to NULL runcur out rather than proactively
* keeping it pointing at the lowest non-full run.
* Make sure that if bin->runcur is non-NULL, it refers to the
* oldest/lowest non-full run. It is okay to NULL runcur out rather
* than proactively keeping it pointing at the oldest/lowest non-full
* run.
*/
if ((uintptr_t)run < (uintptr_t)bin->runcur) {
if (bin->runcur != NULL &&
arena_snad_comp(arena_run_to_miscelm(bin->runcur),
arena_run_to_miscelm(run)) > 0) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
@ -2865,7 +2916,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
arena_bin_lower_run(arena, run, bin);
if (config_stats) {
bin->stats.ndalloc++;
@ -3452,6 +3503,13 @@ arena_nthreads_dec(arena_t *arena, bool internal)
atomic_sub_u(&arena->nthreads[internal], 1);
}
size_t
arena_extent_sn_next(arena_t *arena)
{
return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
}
arena_t *
arena_new(tsdn_t *tsdn, unsigned ind)
{
@ -3511,6 +3569,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
ql_new(&arena->achunks);
arena->extent_sn_next = 0;
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
@ -3532,9 +3592,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
WITNESS_RANK_ARENA_HUGE))
return (NULL);
extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))

View File

@ -5,7 +5,8 @@
/* Data. */
static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad;
static size_t base_extent_sn_next;
static extent_tree_t base_avail_szsnad;
static extent_node_t *base_nodes;
static size_t base_allocated;
static size_t base_resident;
@ -39,6 +40,14 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
base_nodes = node;
}
static void
base_extent_node_init(extent_node_t *node, void *addr, size_t size)
{
size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
extent_node_init(node, NULL, addr, size, sn, true, true);
}
static extent_node_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
@ -68,7 +77,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
extent_node_init(node, NULL, addr, csize, true, true);
base_extent_node_init(node, addr, csize);
return (node);
}
@ -92,12 +101,12 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
extent_node_init(&key, NULL, NULL, usize, 0, false, false);
malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
if (node != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
extent_tree_szsnad_remove(&base_avail_szsnad, node);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(tsdn, csize);
@ -111,7 +120,7 @@ base_alloc(tsdn_t *tsdn, size_t size)
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
extent_tree_szsnad_insert(&base_avail_szsnad, node);
} else
base_node_dalloc(tsdn, node);
if (config_stats) {
@ -149,7 +158,8 @@ base_boot(void)
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
base_extent_sn_next = 0;
extent_tree_szsnad_new(&base_avail_szsnad);
base_nodes = NULL;
return (false);

View File

@ -50,9 +50,9 @@ const chunk_hooks_t chunk_hooks_default = {
*/
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
bool committed);
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
/******************************************************************************/
@ -183,26 +183,25 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
}
/*
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* fits.
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
* best fits.
*/
static extent_node_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
{
extent_node_t key;
assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
extent_node_init(&key, arena, NULL, size, 0, false, false);
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
}
static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit, bool dalloc_node)
{
void *ret;
extent_node_t *node;
@ -228,12 +227,11 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
@ -246,6 +244,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
*sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
@ -260,13 +259,13 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_szsnad_remove(chunks_szsnad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
@ -278,9 +277,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize, zeroed,
committed);
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
chunks_ad, cache, ret, size + trailsize, *sn,
zeroed, committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
@ -289,22 +288,22 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (node == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szad, chunks_ad, cache, ret, size +
trailsize, zeroed, committed);
chunks_szsnad, chunks_ad, cache, ret, size
+ trailsize, *sn, zeroed, committed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
trailsize, *sn, zeroed, committed);
extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
cache, ret, size, zeroed, committed);
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
cache, ret, size, *sn, zeroed, committed);
return (NULL);
}
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@ -388,8 +387,8 @@ chunk_alloc_base(size_t size)
void *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit, bool dalloc_node)
{
void *ret;
@ -399,8 +398,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, zero, commit, dalloc_node);
&arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, sn, zero, commit, dalloc_node);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@ -454,7 +453,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
static void *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{
void *ret;
@ -464,8 +464,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true);
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, sn, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
@ -475,14 +475,15 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{
void *ret;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
alignment, sn, zero, commit);
if (ret == NULL) {
if (chunk_hooks->alloc == chunk_alloc_default) {
/* Call directly to propagate tsdn. */
@ -496,6 +497,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (ret == NULL)
return (NULL);
*sn = arena_extent_sn_next(arena);
if (config_valgrind && chunk_hooks->alloc !=
chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
@ -506,8 +509,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
@ -519,7 +522,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
@ -531,15 +534,17 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
* remove/insert from/into chunks_szsnad.
*/
extent_tree_szad_remove(chunks_szad, node);
extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
if (sn < extent_node_sn_get(node))
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
@ -557,10 +562,10 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
goto label_return;
}
extent_node_init(node, arena, chunk, size, !unzeroed,
extent_node_init(node, arena, chunk, size, sn, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
@ -574,19 +579,21 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
* remove/insert node from/into chunks_szsnad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_szsnad_remove(chunks_szsnad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(tsdn, arena, prev);
@ -598,7 +605,7 @@ label_return:
void
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed)
void *chunk, size_t size, size_t sn, bool committed)
{
assert(chunk != NULL);
@ -606,8 +613,9 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
&arena->chunks_ad_cached, true, chunk, size, sn, false,
committed);
arena_maybe_purge(tsdn, arena);
}
@ -630,7 +638,7 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
void
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{
bool err;
@ -656,8 +664,9 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
&arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
committed);
if (config_stats)
arena->stats.retained += size;

View File

@ -162,7 +162,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
false, true);
arena_extent_sn_next(arena), false,
true);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(

View File

@ -26,30 +26,22 @@ extent_quantize(size_t size)
}
JEMALLOC_INLINE_C int
extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
{
int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
* equally useful extents only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
return (ret);
return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
extent_szad_comp)
JEMALLOC_INLINE_C int
extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
{
uint64_t a_sn = (uintptr_t)extent_node_sn_get(a);
uint64_t b_sn = (uintptr_t)extent_node_sn_get(b);
return ((a_sn > b_sn) - (a_sn < b_sn));
}
JEMALLOC_INLINE_C int
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
@ -60,5 +52,26 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr));
}
JEMALLOC_INLINE_C int
extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
{
int ret;
ret = extent_sz_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_sn_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_ad_comp(a, b);
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
extent_szsnad_comp)
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)

View File

@ -56,6 +56,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
size_t ausize;
arena_t *iarena;
extent_node_t *node;
size_t sn;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@ -68,7 +69,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get();
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
a0get();
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, iarena);
if (node == NULL)
@ -82,15 +84,15 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, is_zeroed, true);
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
@ -245,7 +247,8 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
extent_node_sn_get(node));
return (false);
}
@ -407,7 +410,8 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
extent_node_addr_get(node), extent_node_size_get(node),
extent_node_sn_get(node));
idalloctm(tsdn, node, NULL, true, true);
arena_decay_tick(tsdn, arena);