Add extent serial numbers.

Add extent serial numbers and use them where appropriate as a sort key
that is higher priority than address, so that the allocation policy
prefers older extents.

This resolves #147.
This commit is contained in:
Jason Evans
2016-11-14 18:27:23 -08:00
parent 45f83a2ac6
commit 5c77af98b1
10 changed files with 299 additions and 164 deletions

View File

@@ -374,10 +374,12 @@ struct arena_s {
dss_prec_t dss_prec;
/* Extant arena chunks. */
ql_head(extent_node_t) achunks;
/* Extent serial number generator state. */
size_t extent_sn_next;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -453,9 +455,9 @@ struct arena_s {
* orderings are needed, which is why there are two trees with the same
* contents.
*/
extent_tree_t chunks_szad_cached;
extent_tree_t chunks_szsnad_cached;
extent_tree_t chunks_ad_cached;
extent_tree_t chunks_szad_retained;
extent_tree_t chunks_szsnad_retained;
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
@@ -522,13 +524,13 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero);
size_t alignment, size_t *sn, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize);
size_t usize, size_t sn);
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
void *chunk, size_t oldsize, size_t usize, size_t sn);
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize, bool *zero);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
@@ -601,6 +603,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
void arena_boot(void);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);

View File

@@ -58,15 +58,16 @@ void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, bool dalloc_node);
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
size_t *sn, bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);

View File

@@ -18,6 +18,20 @@ struct extent_node_s {
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
@@ -45,8 +59,8 @@ struct extent_node_s {
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
/* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
@@ -61,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
@@ -73,6 +87,7 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
@@ -80,12 +95,13 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, bool zeroed, bool committed);
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
@@ -114,6 +130,13 @@ extent_node_size_get(const extent_node_t *node)
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
@@ -164,6 +187,13 @@ extent_node_size_set(extent_node_t *node, size_t size)
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
@@ -194,12 +224,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
bool zeroed, bool committed)
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);

View File

@@ -36,6 +36,7 @@ arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_sn_next
arena_get
arena_ichoose
arena_init
@@ -218,6 +219,8 @@ extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_destroy
@@ -239,25 +242,25 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_destroy
extent_tree_szad_destroy_recurse
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
extent_tree_szsnad_destroy
extent_tree_szsnad_destroy_recurse
extent_tree_szsnad_empty
extent_tree_szsnad_first
extent_tree_szsnad_insert
extent_tree_szsnad_iter
extent_tree_szsnad_iter_recurse
extent_tree_szsnad_iter_start
extent_tree_szsnad_last
extent_tree_szsnad_new
extent_tree_szsnad_next
extent_tree_szsnad_nsearch
extent_tree_szsnad_prev
extent_tree_szsnad_psearch
extent_tree_szsnad_remove
extent_tree_szsnad_reverse_iter
extent_tree_szsnad_reverse_iter_recurse
extent_tree_szsnad_reverse_iter_start
extent_tree_szsnad_search
ffs_llu
ffs_lu
ffs_u