Add a "dumpable" bit to the extent state.

Currently, this is unused (i.e. all extents are always marked dumpable).  In the
future, we'll begin using this functionality.
This commit is contained in:
David Goldblatt 2017-09-18 17:25:57 -07:00 committed by David Goldblatt
parent bbaa72422b
commit d14bbf8d81
6 changed files with 65 additions and 21 deletions

View File

@ -93,6 +93,12 @@ extent_committed_get(const extent_t *extent) {
EXTENT_BITS_COMMITTED_SHIFT);
}
static inline bool
extent_dumpable_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
EXTENT_BITS_DUMPABLE_SHIFT);
}
static inline bool
extent_slab_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
@ -269,6 +275,12 @@ extent_committed_set(extent_t *extent, bool committed) {
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
}
static inline void
extent_dumpable_set(extent_t *extent, bool dumpable) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
}
static inline void
extent_slab_set(extent_t *extent, bool slab) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
@ -283,7 +295,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
static inline void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed) {
bool committed, bool dumpable) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
@ -295,6 +307,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
extent_state_set(extent, state);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
extent_dumpable_set(extent, dumpable);
ql_elm_new(extent, ql_link);
if (config_prof) {
extent_prof_tctx_set(extent, NULL);
@ -312,6 +325,7 @@ extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
extent_dumpable_set(extent, true);
}
static inline void

View File

@ -23,13 +23,14 @@ struct extent_s {
* a: arena_ind
* b: slab
* c: committed
* d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* n: sn
*
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
@ -44,6 +45,23 @@ struct extent_s {
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* dumpable: The dumpable flag indicates whether or not we've set the
* memory in question to be dumpable. Note that this
* interacts somewhat subtly with user-specified extent hooks,
* since we don't know if *they* are fiddling with
* dumpability (in which case, we don't want to undo whatever
* they're doing). To deal with this scenario, we:
* - Make dumpable false only for memory allocated with the
* default hooks.
* - Only allow memory to go from non-dumpable to dumpable,
* and only once.
* - Never make the OS call to allow dumping when the
* dumpable bit is already set.
* These three constraints mean that we will never
* accidentally dump user memory that the user meant to set
* nondumpable with their extent hooks.
*
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
@ -80,25 +98,29 @@ struct extent_s {
#define EXTENT_BITS_COMMITTED_MASK \
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
#define EXTENT_BITS_DUMPABLE_SHIFT (MALLOCX_ARENA_BITS + 2)
#define EXTENT_BITS_DUMPABLE_MASK \
((uint64_t)0x1U << EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 3)
#define EXTENT_BITS_ZEROED_MASK \
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 4)
#define EXTENT_BITS_STATE_MASK \
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 6)
#define EXTENT_BITS_SZIND_MASK \
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_SHIFT \
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
(MALLOCX_ARENA_BITS + 6 + LG_CEIL_NSIZES)
#define EXTENT_BITS_NFREE_MASK \
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT \
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
(MALLOCX_ARENA_BITS + 6 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
@ -128,7 +150,7 @@ struct extent_s {
*/
ql_elm(extent_t) ql_link;
/*
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/

View File

@ -449,8 +449,10 @@ extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
size, pad, alignment, slab, szind, zero, commit, false);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
new_addr, size, pad, alignment, slab, szind, zero, commit, false);
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
}
void
@ -458,6 +460,7 @@ extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
assert(extent_dumpable_get(extent));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
@ -1207,11 +1210,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed,
committed);
committed, true);
if (ptr == NULL) {
extent_dalloc(tsdn, arena, extent);
goto label_err;
}
if (extent_register_no_gdump_add(tsdn, extent)) {
extents_leak(tsdn, arena, r_extent_hooks,
&arena->extents_retained, extent, true);
@ -1374,7 +1378,8 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
return NULL;
}
extent_init(extent, arena, addr, esize, slab, szind,
arena_extent_sn_next(arena), extent_state_active, zero, commit);
arena_extent_sn_next(arena), extent_state_active, zero, commit,
true);
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
@ -1412,6 +1417,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
}
@ -1636,6 +1642,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
assert(extent_dumpable_get(extent));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
@ -1926,7 +1933,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent));
extent_committed_get(extent), extent_dumpable_get(extent));
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@ -1937,7 +1944,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_init(&lead, arena, extent_addr_get(extent), size_a,
slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent));
extent_committed_get(extent), extent_dumpable_get(extent));
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
true, &lead_elm_a, &lead_elm_b);

View File

@ -156,7 +156,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_init(gap, arena, gap_addr_page,
gap_size_page, false, NSIZES,
arena_extent_sn_next(arena),
extent_state_active, false, true);
extent_state_active, false, true, true);
}
/*
* Compute the address just past the end of the desired
@ -199,7 +199,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_init(&extent, arena, ret, size,
size, false, NSIZES,
extent_state_active, false, true);
extent_state_active, false, true,
true);
if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0,
size)) {

View File

@ -87,9 +87,9 @@ TEST_BEGIN(test_rtree_extrema) {
extent_t extent_a, extent_b;
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false,
sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false,
false);
false, true);
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
extent_state_active, false, false, true);
tsdn_t *tsdn = tsdn_fetch();
@ -126,7 +126,7 @@ TEST_BEGIN(test_rtree_bits) {
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
extent_state_active, false, false, true);
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
@ -167,7 +167,7 @@ TEST_BEGIN(test_rtree_random) {
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
extent_state_active, false, false, true);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");

View File

@ -9,7 +9,7 @@ TEST_BEGIN(test_arena_slab_regind) {
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
extent_init(&slab, NULL, mallocx(bin_info->slab_size,
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
binind, 0, extent_state_active, false, true);
binind, 0, extent_state_active, false, true, true);
assert_ptr_not_null(extent_addr_get(&slab),
"Unexpected malloc() failure");
for (regind = 0; regind < bin_info->nregs; regind++) {