Add extent serial numbers.
Add extent serial numbers and use them where appropriate as a sort key that is higher priority than address, so that the allocation policy prefers older extents. This resolves #147.
This commit is contained in:
parent
c0a667112c
commit
a38acf716e
@ -130,7 +130,8 @@ struct arena_bin_s {
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is lowest in memory.
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
@ -184,6 +185,9 @@ struct arena_s {
|
||||
*/
|
||||
size_t offset_state;
|
||||
|
||||
/* Extent serial number generator state. */
|
||||
size_t extent_sn_next;
|
||||
|
||||
dss_prec_t dss_prec;
|
||||
|
||||
/* True if a thread is currently executing arena_purge_to_limit(). */
|
||||
@ -224,8 +228,8 @@ struct arena_s {
|
||||
|
||||
/* User-configurable extent hook functions. */
|
||||
union {
|
||||
extent_hooks_t *extent_hooks;
|
||||
void *extent_hooks_pun;
|
||||
extent_hooks_t *extent_hooks;
|
||||
void *extent_hooks_pun;
|
||||
};
|
||||
|
||||
/* Cache of extent structures that were allocated via base_alloc(). */
|
||||
@ -320,6 +324,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
size_t arena_extent_sn_next(arena_t *arena);
|
||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
|
||||
void arena_boot(void);
|
||||
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
||||
|
@ -26,6 +26,20 @@ struct extent_s {
|
||||
*/
|
||||
size_t e_usize;
|
||||
|
||||
/*
|
||||
* Serial number (potentially non-unique).
|
||||
*
|
||||
* In principle serial numbers can wrap around on 32-bit systems if
|
||||
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
|
||||
* back on address comparison for equal serial numbers, stable (if
|
||||
* imperfect) ordering is maintained.
|
||||
*
|
||||
* Serial numbers may not be unique even in the absence of wrap-around,
|
||||
* e.g. when splitting an extent and assigning the same serial number to
|
||||
* both resulting adjacent extents.
|
||||
*/
|
||||
size_t e_sn;
|
||||
|
||||
/* True if extent is active (in use). */
|
||||
bool e_active;
|
||||
|
||||
@ -66,7 +80,7 @@ struct extent_s {
|
||||
qr(extent_t) qr_link;
|
||||
|
||||
union {
|
||||
/* Linkage for per size class address-ordered heaps. */
|
||||
/* Linkage for per size class sn/address-ordered heaps. */
|
||||
phn(extent_t) ph_link;
|
||||
|
||||
/* Linkage for arena's large and extent_cache lists. */
|
||||
@ -144,6 +158,7 @@ size_t extent_usize_get(const extent_t *extent);
|
||||
void *extent_before_get(const extent_t *extent);
|
||||
void *extent_last_get(const extent_t *extent);
|
||||
void *extent_past_get(const extent_t *extent);
|
||||
size_t extent_sn_get(const extent_t *extent);
|
||||
bool extent_active_get(const extent_t *extent);
|
||||
bool extent_retained_get(const extent_t *extent);
|
||||
bool extent_zeroed_get(const extent_t *extent);
|
||||
@ -157,16 +172,20 @@ void extent_addr_set(extent_t *extent, void *addr);
|
||||
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
||||
void extent_size_set(extent_t *extent, size_t size);
|
||||
void extent_usize_set(extent_t *extent, size_t usize);
|
||||
void extent_sn_set(extent_t *extent, size_t sn);
|
||||
void extent_active_set(extent_t *extent, bool active);
|
||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||
void extent_committed_set(extent_t *extent, bool committed);
|
||||
void extent_slab_set(extent_t *extent, bool slab);
|
||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||
size_t size, size_t usize, bool active, bool zeroed, bool committed,
|
||||
bool slab);
|
||||
size_t size, size_t usize, size_t sn, bool active, bool zeroed,
|
||||
bool committed, bool slab);
|
||||
void extent_ring_insert(extent_t *sentinel, extent_t *extent);
|
||||
void extent_ring_remove(extent_t *extent);
|
||||
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||
@ -243,6 +262,13 @@ extent_past_get(const extent_t *extent)
|
||||
extent_size_get(extent)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
extent_sn_get(const extent_t *extent)
|
||||
{
|
||||
|
||||
return (extent->e_sn);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_active_get(const extent_t *extent)
|
||||
{
|
||||
@ -351,6 +377,13 @@ extent_usize_set(extent_t *extent, size_t usize)
|
||||
extent->e_usize = usize;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_sn_set(extent_t *extent, size_t sn)
|
||||
{
|
||||
|
||||
extent->e_sn = sn;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_active_set(extent_t *extent, bool active)
|
||||
{
|
||||
@ -388,7 +421,8 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
size_t usize, bool active, bool zeroed, bool committed, bool slab)
|
||||
size_t usize, size_t sn, bool active, bool zeroed, bool committed,
|
||||
bool slab)
|
||||
{
|
||||
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
@ -397,6 +431,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
extent_addr_set(extent, addr);
|
||||
extent_size_set(extent, size);
|
||||
extent_usize_set(extent, usize);
|
||||
extent_sn_set(extent, sn);
|
||||
extent_active_set(extent, active);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
@ -419,6 +454,37 @@ extent_ring_remove(extent_t *extent)
|
||||
|
||||
qr_remove(extent, qr_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
extent_sn_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
size_t a_sn = extent_sn_get(a);
|
||||
size_t b_sn = extent_sn_get(b);
|
||||
|
||||
return ((a_sn > b_sn) - (a_sn < b_sn));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
extent_ad_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||
|
||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
extent_snad_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = extent_sn_comp(a, b);
|
||||
if (ret != 0)
|
||||
return (ret);
|
||||
|
||||
ret = extent_ad_comp(a, b);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
|
@ -31,6 +31,7 @@ arena_extent_cache_maybe_remove
|
||||
arena_extent_dalloc_large
|
||||
arena_extent_ralloc_large_expand
|
||||
arena_extent_ralloc_large_shrink
|
||||
arena_extent_sn_next
|
||||
arena_get
|
||||
arena_ichoose
|
||||
arena_init
|
||||
@ -132,6 +133,7 @@ decay_ticker_get
|
||||
dss_prec_names
|
||||
extent_active_get
|
||||
extent_active_set
|
||||
extent_ad_comp
|
||||
extent_addr_get
|
||||
extent_addr_randomize
|
||||
extent_addr_set
|
||||
@ -188,6 +190,10 @@ extent_slab_data_get
|
||||
extent_slab_data_get_const
|
||||
extent_slab_get
|
||||
extent_slab_set
|
||||
extent_sn_comp
|
||||
extent_sn_get
|
||||
extent_sn_set
|
||||
extent_snad_comp
|
||||
extent_split_wrapper
|
||||
extent_usize_get
|
||||
extent_usize_set
|
||||
|
27
src/arena.c
27
src/arena.c
@ -760,7 +760,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
|
||||
size_t ndirty = arena_dirty_count(tsdn, arena);
|
||||
assert(ndirty == arena->ndirty);
|
||||
}
|
||||
extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, false, false,
|
||||
extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, 0, false, false,
|
||||
false, false);
|
||||
|
||||
npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit,
|
||||
@ -1351,12 +1351,12 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
assert(extent_slab_data_get(slab)->nfree > 0);
|
||||
|
||||
/*
|
||||
* Make sure that if bin->slabcur is non-NULL, it refers to the lowest
|
||||
* non-full slab. It is okay to NULL slabcur out rather than
|
||||
* proactively keeping it pointing at the lowest non-full slab.
|
||||
* Make sure that if bin->slabcur is non-NULL, it refers to the
|
||||
* oldest/lowest non-full slab. It is okay to NULL slabcur out rather
|
||||
* than proactively keeping it pointing at the oldest/lowest non-full
|
||||
* slab.
|
||||
*/
|
||||
if (bin->slabcur != NULL && (uintptr_t)extent_addr_get(slab) <
|
||||
(uintptr_t)extent_addr_get(bin->slabcur)) {
|
||||
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
|
||||
/* Switch slabcur. */
|
||||
if (extent_slab_data_get(bin->slabcur)->nfree > 0)
|
||||
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
|
||||
@ -1651,6 +1651,13 @@ arena_nthreads_dec(arena_t *arena, bool internal)
|
||||
atomic_sub_u(&arena->nthreads[internal], 1);
|
||||
}
|
||||
|
||||
size_t
|
||||
arena_extent_sn_next(arena_t *arena)
|
||||
{
|
||||
|
||||
return (atomic_add_zu(&arena->extent_sn_next, 1) - 1);
|
||||
}
|
||||
|
||||
arena_t *
|
||||
arena_new(tsdn_t *tsdn, unsigned ind)
|
||||
{
|
||||
@ -1684,6 +1691,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
||||
(size_t)(uintptr_t)arena;
|
||||
}
|
||||
|
||||
arena->extent_sn_next = 0;
|
||||
|
||||
arena->dss_prec = extent_dss_prec_get();
|
||||
|
||||
arena->purging = false;
|
||||
@ -1702,7 +1711,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
||||
extent_heap_new(&arena->extents_retained[i]);
|
||||
}
|
||||
|
||||
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
|
||||
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, 0, false, false,
|
||||
false, false);
|
||||
|
||||
if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
|
||||
@ -1724,8 +1733,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
||||
return (NULL);
|
||||
bin->slabcur = NULL;
|
||||
extent_heap_new(&bin->slabs_nonfull);
|
||||
extent_init(&bin->slabs_full, arena, NULL, 0, 0, false, false,
|
||||
false, false);
|
||||
extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false,
|
||||
false, false, false);
|
||||
if (config_stats)
|
||||
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
|
||||
}
|
||||
|
12
src/base.c
12
src/base.c
@ -5,6 +5,7 @@
|
||||
/* Data. */
|
||||
|
||||
static malloc_mutex_t base_mtx;
|
||||
static size_t base_extent_sn_next;
|
||||
static extent_heap_t base_avail[NSIZES];
|
||||
static extent_t *base_extents;
|
||||
static size_t base_allocated;
|
||||
@ -37,6 +38,14 @@ base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
|
||||
base_extents = extent;
|
||||
}
|
||||
|
||||
static void
|
||||
base_extent_init(extent_t *extent, void *addr, size_t size)
|
||||
{
|
||||
size_t sn = atomic_add_zu(&base_extent_sn_next, 1) - 1;
|
||||
|
||||
extent_init(extent, NULL, addr, size, 0, sn, true, true, true, false);
|
||||
}
|
||||
|
||||
static extent_t *
|
||||
base_extent_alloc(tsdn_t *tsdn, size_t minsize)
|
||||
{
|
||||
@ -74,7 +83,7 @@ base_extent_alloc(tsdn_t *tsdn, size_t minsize)
|
||||
base_resident += PAGE_CEILING(nsize);
|
||||
}
|
||||
}
|
||||
extent_init(extent, NULL, addr, esize, 0, true, true, true, false);
|
||||
base_extent_init(extent, addr, esize);
|
||||
return (extent);
|
||||
}
|
||||
|
||||
@ -164,6 +173,7 @@ base_boot(void)
|
||||
|
||||
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
||||
return (true);
|
||||
base_extent_sn_next = 0;
|
||||
for (i = 0; i < NSIZES; i++)
|
||||
extent_heap_new(&base_avail[i]);
|
||||
base_extents = NULL;
|
||||
|
28
src/extent.c
28
src/extent.c
@ -177,17 +177,8 @@ extent_size_quantize_t *extent_size_quantize_ceil =
|
||||
JEMALLOC_N(n_extent_size_quantize_ceil);
|
||||
#endif
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_ad_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||
|
||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||
}
|
||||
|
||||
/* Generate pairing heap functions. */
|
||||
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)
|
||||
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
|
||||
|
||||
static void
|
||||
extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
|
||||
@ -353,8 +344,8 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent)
|
||||
}
|
||||
|
||||
/*
|
||||
* Do first-best-fit extent selection, i.e. select the lowest extent that best
|
||||
* fits.
|
||||
* Do first-best-fit extent selection, i.e. select the oldest/lowest extent that
|
||||
* best fits.
|
||||
*/
|
||||
static extent_t *
|
||||
extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
|
||||
@ -708,7 +699,8 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_dalloc(tsdn, arena, extent);
|
||||
return (NULL);
|
||||
}
|
||||
extent_init(extent, arena, addr, size, usize, true, zero, commit, slab);
|
||||
extent_init(extent, arena, addr, size, usize,
|
||||
arena_extent_sn_next(arena), true, zero, commit, slab);
|
||||
if (pad != 0)
|
||||
extent_addr_randomize(tsdn, extent, alignment);
|
||||
if (extent_register(tsdn, extent)) {
|
||||
@ -1036,7 +1028,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t lead;
|
||||
|
||||
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||
usize_a, extent_active_get(extent),
|
||||
usize_a, extent_sn_get(extent), extent_active_get(extent),
|
||||
extent_zeroed_get(extent), extent_committed_get(extent),
|
||||
extent_slab_get(extent));
|
||||
|
||||
@ -1046,9 +1038,9 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||
size_a), size_b, usize_b, extent_active_get(extent),
|
||||
extent_zeroed_get(extent), extent_committed_get(extent),
|
||||
extent_slab_get(extent));
|
||||
size_a), size_b, usize_b, extent_sn_get(extent),
|
||||
extent_active_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent), extent_slab_get(extent));
|
||||
if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
|
||||
&trail_elm_a, &trail_elm_b))
|
||||
goto label_error_c;
|
||||
@ -1145,6 +1137,8 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
||||
extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
|
||||
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
|
||||
extent_sn_get(a) : extent_sn_get(b));
|
||||
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
||||
|
||||
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
|
||||
|
@ -142,7 +142,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
gap_size = (uintptr_t)ret - (uintptr_t)gap_addr;
|
||||
if (gap_size != 0) {
|
||||
extent_init(gap, arena, gap_addr, gap_size,
|
||||
gap_size, false, false, true, false);
|
||||
gap_size, arena_extent_sn_next(arena),
|
||||
false, false, true, false);
|
||||
}
|
||||
dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)max_cur ||
|
||||
|
Loading…
Reference in New Issue
Block a user