Track extent structure serial number (esn) in extent_t.

This enables stable sorting of extent_t structures.
This commit is contained in:
Jason Evans 2017-04-16 21:51:26 -07:00
parent 69aa552809
commit 76b35f4b2f
7 changed files with 121 additions and 48 deletions

View File

@ -1,18 +1,19 @@
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void base_delete(base_t *base);
extent_hooks_t *base_extent_hooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void base_delete(base_t *base);
extent_hooks_t *base_extent_hooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */

View File

@ -17,6 +17,8 @@ unsigned extent_nfree_get(const extent_t *extent);
void *extent_base_get(const extent_t *extent);
void *extent_addr_get(const extent_t *extent);
size_t extent_size_get(const extent_t *extent);
size_t extent_esn_get(const extent_t *extent);
size_t extent_bsize_get(const extent_t *extent);
void *extent_before_get(const extent_t *extent);
void *extent_last_get(const extent_t *extent);
void *extent_past_get(const extent_t *extent);
@ -27,6 +29,8 @@ void extent_arena_set(extent_t *extent, arena_t *arena);
void extent_addr_set(extent_t *extent, void *addr);
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
void extent_size_set(extent_t *extent, size_t size);
void extent_esn_set(extent_t *extent, size_t esn);
void extent_bsize_set(extent_t *extent, size_t bsize);
void extent_szind_set(extent_t *extent, szind_t szind);
void extent_nfree_set(extent_t *extent, unsigned nfree);
void extent_nfree_inc(extent_t *extent);
@ -40,6 +44,7 @@ void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
void extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed);
void extent_binit(extent_t *extent, void *addr, size_t size, size_t sn);
void extent_list_init(extent_list_t *list);
extent_t *extent_list_first(const extent_list_t *list);
extent_t *extent_list_last(const extent_list_t *list);
@ -141,7 +146,17 @@ extent_addr_get(const extent_t *extent) {
JEMALLOC_INLINE size_t
extent_size_get(const extent_t *extent) {
return extent->e_size;
return (extent->e_size_esn & EXTENT_SIZE_MASK);
}
JEMALLOC_INLINE size_t
extent_esn_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_ESN_MASK);
}
JEMALLOC_INLINE size_t
extent_bsize_get(const extent_t *extent) {
return extent->e_bsize;
}
JEMALLOC_INLINE void *
@ -213,7 +228,19 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
JEMALLOC_INLINE void
extent_size_set(extent_t *extent, size_t size) {
extent->e_size = size;
assert((size & ~EXTENT_SIZE_MASK) == 0);
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
}
JEMALLOC_INLINE void
extent_esn_set(extent_t *extent, size_t esn) {
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
EXTENT_ESN_MASK);
}
JEMALLOC_INLINE void
extent_bsize_set(extent_t *extent, size_t bsize) {
extent->e_bsize = bsize;
}
JEMALLOC_INLINE void
@ -298,6 +325,19 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
}
}
JEMALLOC_INLINE void
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_arena_set(extent, NULL);
extent_addr_set(extent, addr);
extent_bsize_set(extent, bsize);
extent_slab_set(extent, false);
extent_szind_set(extent, NSIZES);
extent_sn_set(extent, sn);
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
}
JEMALLOC_INLINE void
extent_list_init(extent_list_t *list) {
ql_new(list);

View File

@ -23,8 +23,8 @@ struct extent_s {
* z: zeroed
* t: state
* i: szind
* n: sn
* f: nfree
* n: sn
*
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
*
@ -102,8 +102,20 @@ struct extent_s {
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
/* Extent size. */
size_t e_size;
union {
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
/*
* List linkage, used by a variety of lists:

View File

@ -86,6 +86,7 @@ b0get
base_alloc
base_boot
base_delete
base_extent_alloc
base_extent_hooks_get
base_extent_hooks_set
base_ind_get
@ -143,6 +144,9 @@ extent_arena_set
extent_base_get
extent_before_get
extent_boot
extent_binit
extent_bsize_get
extent_bsize_set
extent_commit_wrapper
extent_committed_get
extent_committed_set
@ -156,6 +160,8 @@ extent_dss_boot
extent_dss_mergeable
extent_dss_prec_get
extent_dss_prec_set
extent_esn_get
extent_esn_set
extent_heap_empty
extent_heap_first
extent_heap_insert

View File

@ -88,8 +88,7 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
sn = *extent_sn_next;
(*extent_sn_next)++;
extent_init(extent, NULL, addr, size, false, NSIZES, sn,
extent_state_active, true, true);
extent_binit(extent, addr, size, sn);
}
static void *
@ -103,23 +102,22 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_size_get(extent) >= *gap_size + size);
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_size_get(extent) - *gap_size - size,
false, NSIZES, extent_sn_get(extent), extent_state_active, true,
true);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
return ret;
}
static void
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
size_t gap_size, void *addr, size_t size) {
if (extent_size_get(extent) > 0) {
if (extent_bsize_get(extent) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor = size2index(extent_size_get(extent) + 1) -
szind_t index_floor = size2index(extent_bsize_get(extent) + 1) -
1;
extent_heap_insert(&base->avail[index_floor], extent);
}
@ -286,28 +284,16 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
return old_extent_hooks;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
void *ret;
size_t usize, asize;
szind_t i;
extent_t *extent;
static void *
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t *esn) {
alignment = QUANTUM_CEILING(alignment);
usize = ALIGNMENT_CEILING(size, alignment);
asize = usize + alignment - QUANTUM;
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent = NULL;
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (i = size2index(asize); i < NSIZES; i++) {
for (szind_t i = size2index(asize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
@ -318,17 +304,46 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
CACHELINE, &esn);
if (extent == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
}
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped) {

View File

@ -98,8 +98,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) {
extent = extent_list_last(&arena->extent_freelist);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);
return base_alloc(tsdn, arena->base, sizeof(extent_t),
CACHELINE);
return base_alloc_extent(tsdn, arena->base);
}
extent_list_remove(&arena->extent_freelist, extent);
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);

View File

@ -154,10 +154,10 @@ TEST_BEGIN(test_base_hooks_not_null) {
* that the first block's remaining space is considered for subsequent
* allocation.
*/
assert_zu_ge(extent_size_get(&base->blocks->extent), QUANTUM,
assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM,
"Remainder insufficient for test");
/* Use up all but one quantum of block. */
while (extent_size_get(&base->blocks->extent) > QUANTUM) {
while (extent_bsize_get(&base->blocks->extent) > QUANTUM) {
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
assert_ptr_not_null(p, "Unexpected base_alloc() failure");
}