Track extent is_head state in rtree leaf.

This commit is contained in:
Qi Wang 2021-02-26 15:11:58 -08:00 committed by Qi Wang
parent 862219e461
commit 70d1541c5b
6 changed files with 53 additions and 20 deletions

View File

@ -23,7 +23,7 @@ typedef enum extent_state_e extent_state_t;
enum extent_head_state_e { enum extent_head_state_e {
EXTENT_NOT_HEAD, EXTENT_NOT_HEAD,
EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */ EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
}; };
typedef enum extent_head_state_e extent_head_state_t; typedef enum extent_head_state_e extent_head_state_t;

View File

@ -46,6 +46,7 @@ struct rtree_node_elm_s {
typedef struct rtree_metadata_s rtree_metadata_t; typedef struct rtree_metadata_s rtree_metadata_t;
struct rtree_metadata_s { struct rtree_metadata_s {
szind_t szind; szind_t szind;
bool is_head; /* Mirrors edata->is_head. */
bool slab; bool slab;
}; };
@ -65,9 +66,10 @@ struct rtree_leaf_elm_s {
* *
* x: index * x: index
* e: edata * e: edata
* h: is_head
* b: slab * b: slab
* *
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee00hb
*/ */
atomic_p_t le_bits; atomic_p_t le_bits;
#else #else
@ -184,12 +186,16 @@ rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
& (((uintptr_t)1 << LG_VADDR) - 1); & (((uintptr_t)1 << LG_VADDR) - 1);
uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR; uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
/* /*
* Slab shares the low bit of edata; we know edata is on an even address * Metadata shares the low bits of edata. edata is CACHELINE aligned (in
* (in fact, it's 128 bytes on 64-bit systems; we can enforce this * fact, it's 128 bytes on 64-bit systems); we can enforce this
* alignment if we want to steal 6 extra rtree leaf bits someday. * alignment if we want to steal the extra rtree leaf bits someday.
*/ */
uintptr_t slab_bits = (uintptr_t)contents.metadata.slab; uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
return szind_bits | edata_bits | slab_bits; uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
uintptr_t metadata_bits = szind_bits | is_head_bits | slab_bits;
assert((edata_bits & metadata_bits) == 0);
return edata_bits | metadata_bits;
} }
JEMALLOC_ALWAYS_INLINE rtree_contents_t JEMALLOC_ALWAYS_INLINE rtree_contents_t
@ -198,20 +204,23 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) {
/* Do the easy things first. */ /* Do the easy things first. */
contents.metadata.szind = bits >> LG_VADDR; contents.metadata.szind = bits >> LG_VADDR;
contents.metadata.slab = (bool)(bits & 1); contents.metadata.slab = (bool)(bits & 1);
contents.metadata.is_head = (bool)(bits & (1 << 1));
uintptr_t metadata_mask = ~((uintptr_t)((1 << 2) - 1));
# ifdef __aarch64__ # ifdef __aarch64__
/* /*
* aarch64 doesn't sign extend the highest virtual address bit to set * aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits get zeroed. * the higher ones. Instead, the high bits get zeroed.
*/ */
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
/* Mask off the slab bit. */ /* Mask off metadata. */
uintptr_t low_bit_mask = ~(uintptr_t)1; uintptr_t low_bit_mask = metadata_mask;
uintptr_t mask = high_bit_mask & low_bit_mask; uintptr_t mask = high_bit_mask & low_bit_mask;
contents.edata = (edata_t *)(bits & mask); contents.edata = (edata_t *)(bits & mask);
# else # else
/* Restore sign-extended high bits, mask slab bit. */ /* Restore sign-extended high bits, mask metadata bits. */
contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
>> RTREE_NHIB) & ~((uintptr_t)0x1)); >> RTREE_NHIB) & metadata_mask);
# endif # endif
return contents; return contents;
} }
@ -230,7 +239,8 @@ rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE); ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
contents.metadata.slab = (bool)(metadata_bits & 1); contents.metadata.slab = (bool)(metadata_bits & 1);
contents.metadata.szind = (metadata_bits >> 1); contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
contents.metadata.szind = (metadata_bits >> 2);
contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE); ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
@ -247,7 +257,8 @@ rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else #else
unsigned metadata_bits = ((unsigned)contents.metadata.slab unsigned metadata_bits = ((unsigned)contents.metadata.slab
| ((unsigned)contents.metadata.szind << 1)); | ((unsigned)contents.metadata.is_head << 1)
| ((unsigned)contents.metadata.szind << 2));
atomic_store_u(&elm->le_metadata, metadata_bits, ATOMIC_RELEASE); atomic_store_u(&elm->le_metadata, metadata_bits, ATOMIC_RELEASE);
/* /*
* Write edata last, since the element is atomically considered valid * Write edata last, since the element is atomically considered valid
@ -418,6 +429,7 @@ rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
contents.edata = NULL; contents.edata = NULL;
contents.metadata.szind = SC_NSIZES; contents.metadata.szind = SC_NSIZES;
contents.metadata.slab = false; contents.metadata.slab = false;
contents.metadata.is_head = false;
rtree_leaf_elm_write(tsdn, rtree, elm, contents); rtree_leaf_elm_write(tsdn, rtree, elm, contents);
} }

View File

@ -227,10 +227,14 @@ bool
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global, addr_a); edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global, addr_a);
bool head_a = edata_is_head_get(a); bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global, addr_b); edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global, addr_b);
bool head_b = edata_is_head_get(b); bool head_b = edata_is_head_get(b);
emap_assert_mapped(tsdn, &arena_emap_global, a);
emap_assert_mapped(tsdn, &arena_emap_global, b);
return ehooks_default_merge_impl(tsdn, addr_a, head_a, addr_b, head_b); return ehooks_default_merge_impl(tsdn, addr_a, head_a, addr_b, head_b);
} }

View File

@ -141,6 +141,8 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
contents.edata = edata; contents.edata = edata;
contents.metadata.szind = szind; contents.metadata.szind = szind;
contents.metadata.slab = slab; contents.metadata.slab = slab;
contents.metadata.is_head = (edata == NULL) ? false :
edata_is_head_get(edata);
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents); rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
if (elm_b != NULL) { if (elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents); rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
@ -169,12 +171,14 @@ emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
assert(edata_slab_get(edata)); assert(edata_slab_get(edata));
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = true;
contents.metadata.is_head = false; /* Not allowed to access. */
/* Register interior. */ /* Register interior. */
for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) { for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = true;
rtree_write(tsdn, &emap->rtree, rtree_ctx, rtree_write(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + (uintptr_t)(i << (uintptr_t)edata_base_get(edata) + (uintptr_t)(i <<
LG_PAGE), contents); LG_PAGE), contents);
@ -214,6 +218,8 @@ emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
contents.edata = edata; contents.edata = edata;
contents.metadata.szind = szind; contents.metadata.szind = szind;
contents.metadata.slab = slab; contents.metadata.slab = slab;
contents.metadata.is_head = edata_is_head_get(edata);
rtree_write(tsdn, &emap->rtree, rtree_ctx, rtree_write(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_addr_get(edata), contents); (uintptr_t)edata_addr_get(edata), contents);
/* /*
@ -297,6 +303,7 @@ emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
clear_contents.edata = NULL; clear_contents.edata = NULL;
clear_contents.metadata.szind = SC_NSIZES; clear_contents.metadata.szind = SC_NSIZES;
clear_contents.metadata.slab = false; clear_contents.metadata.slab = false;
clear_contents.metadata.is_head = false;
if (prepare->lead_elm_b != NULL) { if (prepare->lead_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree, rtree_leaf_elm_write(tsdn, &emap->rtree,
@ -320,8 +327,10 @@ void
emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX; EMAP_DECLARE_RTREE_CTX;
assert(rtree_read(tsdn, &emap->rtree, rtree_ctx, rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata)).edata == edata); (uintptr_t)edata_base_get(edata));
assert(contents.edata == edata);
assert(contents.metadata.is_head == edata_is_head_get(edata));
} }
void void

View File

@ -1254,6 +1254,8 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
assert(edata_arena_ind_get(a) == edata_arena_ind_get(b)); assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks)); assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
emap_assert_mapped(tsdn, pac->emap, a);
emap_assert_mapped(tsdn, pac->emap, b);
bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a), bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
edata_size_get(a), edata_is_head_get(a), edata_base_get(b), edata_size_get(a), edata_is_head_get(a), edata_base_get(b),

View File

@ -55,6 +55,7 @@ TEST_BEGIN(test_rtree_extrema) {
contents_a.edata = &edata_a; contents_a.edata = &edata_a;
contents_a.metadata.szind = edata_szind_get(&edata_a); contents_a.metadata.szind = edata_szind_get(&edata_a);
contents_a.metadata.slab = edata_slab_get(&edata_a); contents_a.metadata.slab = edata_slab_get(&edata_a);
contents_a.metadata.is_head = edata_is_head_get(&edata_a);
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a), expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
"Unexpected rtree_write() failure"); "Unexpected rtree_write() failure");
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a), expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
@ -63,20 +64,23 @@ TEST_BEGIN(test_rtree_extrema) {
PAGE); PAGE);
expect_true(contents_a.edata == read_contents_a.edata expect_true(contents_a.edata == read_contents_a.edata
&& contents_a.metadata.szind == read_contents_a.metadata.szind && contents_a.metadata.szind == read_contents_a.metadata.szind
&& contents_a.metadata.slab == read_contents_a.metadata.slab, && contents_a.metadata.slab == read_contents_a.metadata.slab
&& contents_a.metadata.is_head == read_contents_a.metadata.is_head,
"rtree_read() should return previously set value"); "rtree_read() should return previously set value");
rtree_contents_t contents_b; rtree_contents_t contents_b;
contents_b.edata = &edata_b; contents_b.edata = &edata_b;
contents_b.metadata.szind = edata_szind_get_maybe_invalid(&edata_b); contents_b.metadata.szind = edata_szind_get_maybe_invalid(&edata_b);
contents_b.metadata.slab = edata_slab_get(&edata_b); contents_b.metadata.slab = edata_slab_get(&edata_b);
contents_b.metadata.is_head = edata_is_head_get(&edata_b);
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
contents_b), "Unexpected rtree_write() failure"); contents_b), "Unexpected rtree_write() failure");
rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx, rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx,
~((uintptr_t)0)); ~((uintptr_t)0));
assert_true(contents_b.edata == read_contents_b.edata assert_true(contents_b.edata == read_contents_b.edata
&& contents_b.metadata.szind == read_contents_b.metadata.szind && contents_b.metadata.szind == read_contents_b.metadata.szind
&& contents_b.metadata.slab == read_contents_b.metadata.slab, && contents_b.metadata.slab == read_contents_b.metadata.slab
&& contents_b.metadata.is_head == read_contents_b.metadata.is_head,
"rtree_read() should return previously set value"); "rtree_read() should return previously set value");
base_delete(tsdn, base); base_delete(tsdn, base);
@ -106,6 +110,7 @@ TEST_BEGIN(test_rtree_bits) {
contents.edata = &edata; contents.edata = &edata;
contents.metadata.szind = SC_NSIZES; contents.metadata.szind = SC_NSIZES;
contents.metadata.slab = false; contents.metadata.slab = false;
contents.metadata.is_head = false;
expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
contents), "Unexpected rtree_write() failure"); contents), "Unexpected rtree_write() failure");
@ -158,6 +163,7 @@ TEST_BEGIN(test_rtree_random) {
contents.edata = &edata; contents.edata = &edata;
contents.metadata.szind = SC_NSIZES; contents.metadata.szind = SC_NSIZES;
contents.metadata.slab = false; contents.metadata.slab = false;
contents.metadata.is_head = false;
rtree_leaf_elm_write(tsdn, rtree, elm, contents); rtree_leaf_elm_write(tsdn, rtree, elm, contents);
expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
keys[i]).edata, &edata, keys[i]).edata, &edata,