Edata: Add a ranged bit.
We steal the dumpable bit, which we ended up not needing.
This commit is contained in:
parent
bd4fdf295e
commit
12eb888e54
@ -68,7 +68,7 @@ struct edata_s {
|
|||||||
* a: arena_ind
|
* a: arena_ind
|
||||||
* b: slab
|
* b: slab
|
||||||
* c: committed
|
* c: committed
|
||||||
* d: dumpable
|
* r: ranged
|
||||||
* z: zeroed
|
* z: zeroed
|
||||||
* t: state
|
* t: state
|
||||||
* i: szind
|
* i: szind
|
||||||
@ -76,7 +76,7 @@ struct edata_s {
|
|||||||
* s: bin_shard
|
* s: bin_shard
|
||||||
* n: sn
|
* n: sn
|
||||||
*
|
*
|
||||||
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
|
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zrcbaaaa aaaaaaaa
|
||||||
*
|
*
|
||||||
* arena_ind: Arena from which this extent came, or all 1 bits if
|
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||||
* unassociated.
|
* unassociated.
|
||||||
@ -91,22 +91,10 @@ struct edata_s {
|
|||||||
* as on a system that overcommits and satisfies physical
|
* as on a system that overcommits and satisfies physical
|
||||||
* memory needs on demand via soft page faults.
|
* memory needs on demand via soft page faults.
|
||||||
*
|
*
|
||||||
* dumpable: The dumpable flag indicates whether or not we've set the
|
* ranged: Whether or not this extent is currently owned by the range
|
||||||
* memory in question to be dumpable. Note that this
|
* allocator. This may be false even if the extent originally
|
||||||
* interacts somewhat subtly with user-specified extent hooks,
|
* came from a range allocator; this indicates its *current*
|
||||||
* since we don't know if *they* are fiddling with
|
* owner, not its original owner.
|
||||||
* dumpability (in which case, we don't want to undo whatever
|
|
||||||
* they're doing). To deal with this scenario, we:
|
|
||||||
* - Make dumpable false only for memory allocated with the
|
|
||||||
* default hooks.
|
|
||||||
* - Only allow memory to go from non-dumpable to dumpable,
|
|
||||||
* and only once.
|
|
||||||
* - Never make the OS call to allow dumping when the
|
|
||||||
* dumpable bit is already set.
|
|
||||||
* These three constraints mean that we will never
|
|
||||||
* accidentally dump user memory that the user meant to set
|
|
||||||
* nondumpable with their extent hooks.
|
|
||||||
*
|
|
||||||
*
|
*
|
||||||
* zeroed: The zeroed flag is used by extent recycling code to track
|
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||||
* whether memory is zero-filled.
|
* whether memory is zero-filled.
|
||||||
@ -148,12 +136,12 @@ struct edata_s {
|
|||||||
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
|
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
|
||||||
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
|
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
|
||||||
|
|
||||||
#define EDATA_BITS_DUMPABLE_WIDTH 1
|
#define EDATA_BITS_RANGED_WIDTH 1
|
||||||
#define EDATA_BITS_DUMPABLE_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
|
#define EDATA_BITS_RANGED_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
|
||||||
#define EDATA_BITS_DUMPABLE_MASK MASK(EDATA_BITS_DUMPABLE_WIDTH, EDATA_BITS_DUMPABLE_SHIFT)
|
#define EDATA_BITS_RANGED_MASK MASK(EDATA_BITS_RANGED_WIDTH, EDATA_BITS_RANGED_SHIFT)
|
||||||
|
|
||||||
#define EDATA_BITS_ZEROED_WIDTH 1
|
#define EDATA_BITS_ZEROED_WIDTH 1
|
||||||
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_DUMPABLE_WIDTH + EDATA_BITS_DUMPABLE_SHIFT)
|
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_RANGED_WIDTH + EDATA_BITS_RANGED_SHIFT)
|
||||||
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
|
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
|
||||||
|
|
||||||
#define EDATA_BITS_STATE_WIDTH 2
|
#define EDATA_BITS_STATE_WIDTH 2
|
||||||
@ -283,9 +271,9 @@ edata_committed_get(const edata_t *edata) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
edata_dumpable_get(const edata_t *edata) {
|
edata_ranged_get(const edata_t *edata) {
|
||||||
return (bool)((edata->e_bits & EDATA_BITS_DUMPABLE_MASK) >>
|
return (bool)((edata->e_bits & EDATA_BITS_RANGED_MASK) >>
|
||||||
EDATA_BITS_DUMPABLE_SHIFT);
|
EDATA_BITS_RANGED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
@ -479,9 +467,9 @@ edata_committed_set(edata_t *edata, bool committed) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
edata_dumpable_set(edata_t *edata, bool dumpable) {
|
edata_ranged_set(edata_t *edata, bool ranged) {
|
||||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_DUMPABLE_MASK) |
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_RANGED_MASK) |
|
||||||
((uint64_t)dumpable << EDATA_BITS_DUMPABLE_SHIFT);
|
((uint64_t)ranged << EDATA_BITS_RANGED_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -522,8 +510,9 @@ edata_is_head_set(edata_t *edata, bool is_head) {
|
|||||||
static inline void
|
static inline void
|
||||||
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
||||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||||
bool committed, bool dumpable, extent_head_state_t is_head) {
|
bool committed, bool ranged, extent_head_state_t is_head) {
|
||||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||||
|
assert(ranged == false);
|
||||||
|
|
||||||
edata_arena_ind_set(edata, arena_ind);
|
edata_arena_ind_set(edata, arena_ind);
|
||||||
edata_addr_set(edata, addr);
|
edata_addr_set(edata, addr);
|
||||||
@ -534,7 +523,7 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
|||||||
edata_state_set(edata, state);
|
edata_state_set(edata, state);
|
||||||
edata_zeroed_set(edata, zeroed);
|
edata_zeroed_set(edata, zeroed);
|
||||||
edata_committed_set(edata, committed);
|
edata_committed_set(edata, committed);
|
||||||
edata_dumpable_set(edata, dumpable);
|
edata_ranged_set(edata, ranged);
|
||||||
ql_elm_new(edata, ql_link);
|
ql_elm_new(edata, ql_link);
|
||||||
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
|
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
@ -553,7 +542,7 @@ edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
|
|||||||
edata_state_set(edata, extent_state_active);
|
edata_state_set(edata, extent_state_active);
|
||||||
edata_zeroed_set(edata, true);
|
edata_zeroed_set(edata, true);
|
||||||
edata_committed_set(edata, true);
|
edata_committed_set(edata, true);
|
||||||
edata_dumpable_set(edata, true);
|
edata_ranged_set(edata, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -246,11 +246,11 @@ emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
|||||||
(void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
|
(void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
|
||||||
slab_b, szind_b, edata_sn_get(edata), edata_state_get(edata),
|
slab_b, szind_b, edata_sn_get(edata), edata_state_get(edata),
|
||||||
edata_zeroed_get(edata), edata_committed_get(edata),
|
edata_zeroed_get(edata), edata_committed_get(edata),
|
||||||
edata_dumpable_get(edata), EXTENT_NOT_HEAD);
|
edata_ranged_get(edata), EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use incorrect constants for things like arena ind, zero, dump, and
|
* We use incorrect constants for things like arena ind, zero, ranged,
|
||||||
* commit state, and head status. This is a fake edata_t, used to
|
* and commit state, and head status. This is a fake edata_t, used to
|
||||||
* facilitate a lookup.
|
* facilitate a lookup.
|
||||||
*/
|
*/
|
||||||
edata_t lead;
|
edata_t lead;
|
||||||
|
14
src/extent.c
14
src/extent.c
@ -80,7 +80,7 @@ ecache_alloc(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
|||||||
bool commit = true;
|
bool commit = true;
|
||||||
edata_t *edata = extent_recycle(tsdn, shard, ehooks, ecache,
|
edata_t *edata = extent_recycle(tsdn, shard, ehooks, ecache,
|
||||||
new_addr, size, alignment, slab, szind, zero, &commit, false);
|
new_addr, size, alignment, slab, szind, zero, &commit, false);
|
||||||
assert(edata == NULL || edata_dumpable_get(edata));
|
assert(edata == NULL || !edata_ranged_get(edata));
|
||||||
return edata;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ ecache_alloc_grow(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
|||||||
size, alignment, slab, szind, zero, &commit);
|
size, alignment, slab, szind, zero, &commit);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(edata == NULL || edata_dumpable_get(edata));
|
assert(edata == NULL || !edata_ranged_get(edata));
|
||||||
return edata;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ ecache_dalloc(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
|||||||
ecache_t *ecache, edata_t *edata) {
|
ecache_t *ecache, edata_t *edata) {
|
||||||
assert(edata_base_get(edata) != NULL);
|
assert(edata_base_get(edata) != NULL);
|
||||||
assert(edata_size_get(edata) != 0);
|
assert(edata_size_get(edata) != 0);
|
||||||
assert(edata_dumpable_get(edata));
|
assert(!edata_ranged_get(edata));
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
@ -661,7 +661,8 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
|||||||
|
|
||||||
edata_init(edata, ecache_ind_get(&shard->ecache_retained), ptr,
|
edata_init(edata, ecache_ind_get(&shard->ecache_retained), ptr,
|
||||||
alloc_size, false, SC_NSIZES, pa_shard_extent_sn_next(shard),
|
alloc_size, false, SC_NSIZES, pa_shard_extent_sn_next(shard),
|
||||||
extent_state_active, zeroed, committed, true, EXTENT_IS_HEAD);
|
extent_state_active, zeroed, committed, /* ranged */ false,
|
||||||
|
EXTENT_IS_HEAD);
|
||||||
|
|
||||||
if (extent_register_no_gdump_add(tsdn, edata)) {
|
if (extent_register_no_gdump_add(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
@ -814,7 +815,8 @@ extent_alloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
edata_init(edata, ecache_ind_get(&shard->ecache_dirty), addr,
|
edata_init(edata, ecache_ind_get(&shard->ecache_dirty), addr,
|
||||||
size, slab, szind, pa_shard_extent_sn_next(shard),
|
size, slab, szind, pa_shard_extent_sn_next(shard),
|
||||||
extent_state_active, *zero, *commit, true, EXTENT_NOT_HEAD);
|
extent_state_active, *zero, *commit, /* ranged */ false,
|
||||||
|
EXTENT_NOT_HEAD);
|
||||||
if (extent_register(tsdn, edata)) {
|
if (extent_register(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1059,7 +1061,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
|||||||
void
|
void
|
||||||
extent_dalloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
extent_dalloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
edata_t *edata) {
|
||||||
assert(edata_dumpable_get(edata));
|
assert(!edata_ranged_get(edata));
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
gap_addr_page, gap_size_page, false,
|
gap_addr_page, gap_size_page, false,
|
||||||
SC_NSIZES, pa_shard_extent_sn_next(
|
SC_NSIZES, pa_shard_extent_sn_next(
|
||||||
&arena->pa_shard), extent_state_active,
|
&arena->pa_shard), extent_state_active,
|
||||||
false, true, true, EXTENT_NOT_HEAD);
|
false, true, false, EXTENT_NOT_HEAD);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Compute the address just past the end of the desired
|
* Compute the address just past the end of the desired
|
||||||
@ -206,7 +206,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
arena_ind_get(arena), ret, size,
|
arena_ind_get(arena), ret, size,
|
||||||
size, false, SC_NSIZES,
|
size, false, SC_NSIZES,
|
||||||
extent_state_active, false, true,
|
extent_state_active, false, true,
|
||||||
true, EXTENT_NOT_HEAD);
|
false, EXTENT_NOT_HEAD);
|
||||||
if (extent_purge_forced_wrapper(tsdn,
|
if (extent_purge_forced_wrapper(tsdn,
|
||||||
ehooks, &edata, 0, size)) {
|
ehooks, &edata, 0, size)) {
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
@ -35,9 +35,9 @@ TEST_BEGIN(test_rtree_extrema) {
|
|||||||
edata_t edata_a, edata_b;
|
edata_t edata_a, edata_b;
|
||||||
edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
|
edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
|
||||||
false, sz_size2index(SC_LARGE_MINCLASS), 0,
|
false, sz_size2index(SC_LARGE_MINCLASS), 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, false, EXTENT_NOT_HEAD);
|
||||||
edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, false, EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
tsdn_t *tsdn = tsdn_fetch();
|
tsdn_t *tsdn = tsdn_fetch();
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ TEST_BEGIN(test_rtree_bits) {
|
|||||||
|
|
||||||
edata_t edata;
|
edata_t edata;
|
||||||
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, false, EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
rtree_t *rtree = &test_rtree;
|
rtree_t *rtree = &test_rtree;
|
||||||
rtree_ctx_t rtree_ctx;
|
rtree_ctx_t rtree_ctx;
|
||||||
@ -126,7 +126,7 @@ TEST_BEGIN(test_rtree_random) {
|
|||||||
|
|
||||||
edata_t edata;
|
edata_t edata;
|
||||||
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
|
||||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
extent_state_active, false, false, false, EXTENT_NOT_HEAD);
|
||||||
|
|
||||||
expect_false(rtree_new(rtree, base, false),
|
expect_false(rtree_new(rtree, base, false),
|
||||||
"Unexpected rtree_new() failure");
|
"Unexpected rtree_new() failure");
|
||||||
|
@ -12,7 +12,7 @@ TEST_BEGIN(test_arena_slab_regind) {
|
|||||||
edata_init(&slab, INVALID_ARENA_IND,
|
edata_init(&slab, INVALID_ARENA_IND,
|
||||||
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
|
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
|
||||||
bin_info->slab_size, true,
|
bin_info->slab_size, true,
|
||||||
binind, 0, extent_state_active, false, true, true,
|
binind, 0, extent_state_active, false, true, false,
|
||||||
EXTENT_NOT_HEAD);
|
EXTENT_NOT_HEAD);
|
||||||
expect_ptr_not_null(edata_addr_get(&slab),
|
expect_ptr_not_null(edata_addr_get(&slab),
|
||||||
"Unexpected malloc() failure");
|
"Unexpected malloc() failure");
|
||||||
|
Loading…
Reference in New Issue
Block a user