Edata: rename "ranged" bit to "pai".

This better represents its intended purpose; the hugepage allocator design
evolved away from needing contiguity of hugepage virtual address space.
This commit is contained in:
David Goldblatt 2020-07-09 18:07:17 -07:00 committed by David Goldblatt
parent 7ad2f78663
commit e034500698
6 changed files with 48 additions and 38 deletions

View File

@ -26,6 +26,16 @@ enum extent_head_state_e {
}; };
typedef enum extent_head_state_e extent_head_state_t; typedef enum extent_head_state_e extent_head_state_t;
/*
* Which implementation of the page allocator interface, (PAI, defined in
* pai.h) owns the given extent?
*/
enum extent_pai_e {
EXTENT_PAI_PAC = 0,
EXTENT_PAI_HPA = 1
};
typedef enum extent_pai_e extent_pai_t;
struct e_prof_info_s { struct e_prof_info_s {
/* Time when this was allocated. */ /* Time when this was allocated. */
nstime_t e_prof_alloc_time; nstime_t e_prof_alloc_time;
@ -68,7 +78,7 @@ struct edata_s {
* a: arena_ind * a: arena_ind
* b: slab * b: slab
* c: committed * c: committed
* r: ranged * p: pai
* z: zeroed * z: zeroed
* t: state * t: state
* i: szind * i: szind
@ -76,7 +86,7 @@ struct edata_s {
* s: bin_shard * s: bin_shard
* n: sn * n: sn
* *
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zrcbaaaa aaaaaaaa * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zpcbaaaa aaaaaaaa
* *
* arena_ind: Arena from which this extent came, or all 1 bits if * arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated. * unassociated.
@ -91,10 +101,7 @@ struct edata_s {
* as on a system that overcommits and satisfies physical * as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults. * memory needs on demand via soft page faults.
* *
* ranged: Whether or not this extent is currently owned by the range * pai: The pai flag is an extent_pai_t.
* allocator. This may be false even if the extent originally
* came from a range allocator; this indicates its *current*
* owner, not its original owner.
* *
* zeroed: The zeroed flag is used by extent recycling code to track * zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled. * whether memory is zero-filled.
@ -136,12 +143,12 @@ struct edata_s {
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) #define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) #define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_RANGED_WIDTH 1 #define EDATA_BITS_PAI_WIDTH 1
#define EDATA_BITS_RANGED_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) #define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_RANGED_MASK MASK(EDATA_BITS_RANGED_WIDTH, EDATA_BITS_RANGED_SHIFT) #define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_WIDTH 1 #define EDATA_BITS_ZEROED_WIDTH 1
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_RANGED_WIDTH + EDATA_BITS_RANGED_SHIFT) #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_STATE_WIDTH 2 #define EDATA_BITS_STATE_WIDTH 2
@ -291,10 +298,10 @@ edata_committed_get(const edata_t *edata) {
EDATA_BITS_COMMITTED_SHIFT); EDATA_BITS_COMMITTED_SHIFT);
} }
static inline bool static inline extent_pai_t
edata_ranged_get(const edata_t *edata) { edata_pai_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_RANGED_MASK) >> return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
EDATA_BITS_RANGED_SHIFT); EDATA_BITS_PAI_SHIFT);
} }
static inline bool static inline bool
@ -488,9 +495,9 @@ edata_committed_set(edata_t *edata, bool committed) {
} }
static inline void static inline void
edata_ranged_set(edata_t *edata, bool ranged) { edata_pai_set(edata_t *edata, extent_pai_t pai) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_RANGED_MASK) | edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
((uint64_t)ranged << EDATA_BITS_RANGED_SHIFT); ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
} }
static inline void static inline void
@ -538,9 +545,8 @@ edata_is_head_set(edata_t *edata, bool is_head) {
static inline void static inline void
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size, edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed, bool ranged, extent_head_state_t is_head) { bool committed, extent_pai_t pai, extent_head_state_t is_head) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab); assert(addr == PAGE_ADDR2BASE(addr) || !slab);
assert(ranged == false);
edata_arena_ind_set(edata, arena_ind); edata_arena_ind_set(edata, arena_ind);
edata_addr_set(edata, addr); edata_addr_set(edata, addr);
@ -551,7 +557,7 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
edata_state_set(edata, state); edata_state_set(edata, state);
edata_zeroed_set(edata, zeroed); edata_zeroed_set(edata, zeroed);
edata_committed_set(edata, committed); edata_committed_set(edata, committed);
edata_ranged_set(edata, ranged); edata_pai_set(edata, pai);
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD); edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
if (config_prof) { if (config_prof) {
edata_prof_tctx_set(edata, NULL); edata_prof_tctx_set(edata, NULL);
@ -569,7 +575,12 @@ edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
edata_state_set(edata, extent_state_active); edata_state_set(edata, extent_state_active);
edata_zeroed_set(edata, true); edata_zeroed_set(edata, true);
edata_committed_set(edata, true); edata_committed_set(edata, true);
edata_ranged_set(edata, false); /*
* This isn't strictly true, but base allocated extents never get
* deallocated and can't be looked up in the emap, but no sense in
* wasting a state bit to encode this fact.
*/
edata_pai_set(edata, EXTENT_PAI_PAC);
} }
static inline int static inline int

View File

@ -249,7 +249,7 @@ emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
*/ */
edata_t lead = {0}; edata_t lead = {0};
edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0, edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
extent_state_active, false, false, false, EXTENT_NOT_HEAD); extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true, emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
&prepare->lead_elm_a, &prepare->lead_elm_b); &prepare->lead_elm_a, &prepare->lead_elm_b);

View File

@ -86,7 +86,7 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
bool commit = true; bool commit = true;
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, new_addr, edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, new_addr,
size, alignment, zero, &commit, false); size, alignment, zero, &commit, false);
assert(edata == NULL || !edata_ranged_get(edata)); assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
return edata; return edata;
} }
@ -115,7 +115,7 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
size, alignment, zero, &commit); size, alignment, zero, &commit);
} }
assert(edata == NULL || !edata_ranged_get(edata)); assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
return edata; return edata;
} }
@ -124,7 +124,7 @@ ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) { edata_t *edata) {
assert(edata_base_get(edata) != NULL); assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0); assert(edata_size_get(edata) != 0);
assert(!edata_ranged_get(edata)); assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
@ -650,7 +650,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr, edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
alloc_size, false, SC_NSIZES, extent_sn_next(pac), alloc_size, false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zeroed, committed, /* ranged */ false, extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
EXTENT_IS_HEAD); EXTENT_IS_HEAD);
if (extent_register_no_gdump_add(tsdn, pac, edata)) { if (extent_register_no_gdump_add(tsdn, pac, edata)) {
@ -790,7 +790,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
} }
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, extent_sn_next(pac), size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zero, *commit, /* ranged */ false, extent_state_active, zero, *commit, EXTENT_PAI_PAC,
EXTENT_NOT_HEAD); EXTENT_NOT_HEAD);
if (extent_register(tsdn, pac, edata)) { if (extent_register(tsdn, pac, edata)) {
edata_cache_put(tsdn, pac->edata_cache, edata); edata_cache_put(tsdn, pac->edata_cache, edata);
@ -1026,7 +1026,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void void
extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) { edata_t *edata) {
assert(!edata_ranged_get(edata)); assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
@ -1180,8 +1180,7 @@ extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
(void *)((uintptr_t)edata_base_get(edata) + size_a), size_b, (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
/* slab */ false, SC_NSIZES, edata_sn_get(edata), /* slab */ false, SC_NSIZES, edata_sn_get(edata),
edata_state_get(edata), edata_zeroed_get(edata), edata_state_get(edata), edata_zeroed_get(edata),
edata_committed_get(edata), edata_ranged_get(edata), edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
EXTENT_NOT_HEAD);
emap_prepare_t prepare; emap_prepare_t prepare;
bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata, bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
size_a, trail, size_b); size_a, trail, size_b);

View File

@ -156,8 +156,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
gap_addr_page, gap_size_page, false, gap_addr_page, gap_size_page, false,
SC_NSIZES, extent_sn_next( SC_NSIZES, extent_sn_next(
&arena->pa_shard.pac), &arena->pa_shard.pac),
extent_state_active, false, true, false, extent_state_active, false, true,
EXTENT_NOT_HEAD); EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
} }
/* /*
* Compute the address just past the end of the desired * Compute the address just past the end of the desired
@ -206,7 +206,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
arena_ind_get(arena), ret, size, arena_ind_get(arena), ret, size,
size, false, SC_NSIZES, size, false, SC_NSIZES,
extent_state_active, false, true, extent_state_active, false, true,
false, EXTENT_NOT_HEAD); EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
if (extent_purge_forced_wrapper(tsdn, if (extent_purge_forced_wrapper(tsdn,
ehooks, &edata, 0, size)) { ehooks, &edata, 0, size)) {
memset(ret, 0, size); memset(ret, 0, size);

View File

@ -36,9 +36,9 @@ TEST_BEGIN(test_rtree_extrema) {
edata_t edata_a = {0}, edata_b = {0}; edata_t edata_a = {0}, edata_b = {0};
edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS, edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
false, sz_size2index(SC_LARGE_MINCLASS), 0, false, sz_size2index(SC_LARGE_MINCLASS), 0,
extent_state_active, false, false, false, EXTENT_NOT_HEAD); extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, false, EXTENT_NOT_HEAD); extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
@ -93,7 +93,7 @@ TEST_BEGIN(test_rtree_bits) {
edata_t edata = {0}; edata_t edata = {0};
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, false, EXTENT_NOT_HEAD); extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
rtree_t *rtree = &test_rtree; rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx; rtree_ctx_t rtree_ctx;
@ -143,7 +143,7 @@ TEST_BEGIN(test_rtree_random) {
edata_t edata = {0}; edata_t edata = {0};
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, false, EXTENT_NOT_HEAD); extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
expect_false(rtree_new(rtree, base, false), expect_false(rtree_new(rtree, base, false),
"Unexpected rtree_new() failure"); "Unexpected rtree_new() failure");

View File

@ -12,7 +12,7 @@ TEST_BEGIN(test_arena_slab_regind) {
edata_init(&slab, INVALID_ARENA_IND, edata_init(&slab, INVALID_ARENA_IND,
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)), mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
bin_info->slab_size, true, bin_info->slab_size, true,
binind, 0, extent_state_active, false, true, false, binind, 0, extent_state_active, false, true, EXTENT_PAI_PAC,
EXTENT_NOT_HEAD); EXTENT_NOT_HEAD);
expect_ptr_not_null(edata_addr_get(&slab), expect_ptr_not_null(edata_addr_get(&slab),
"Unexpected malloc() failure"); "Unexpected malloc() failure");