From 4d8c22f9a57fb29d39394e2382628854542d1520 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Fri, 26 Feb 2021 15:32:41 -0800 Subject: [PATCH] Store edata->state in rtree leaf and make edata_t 128B aligned. Verified that this doesn't result in any real increase of edata_t bytes allocated. --- include/jemalloc/internal/edata.h | 10 ++++- include/jemalloc/internal/emap.h | 30 +++++++++++++++ include/jemalloc/internal/rtree.h | 46 +++++++++++++++-------- src/base.c | 2 +- src/emap.c | 6 +++ src/extent.c | 36 ++++++++++-------- src/hpa_central.c | 7 ++-- test/unit/rtree.c | 61 +++++++++++++++++++------------ 8 files changed, 138 insertions(+), 60 deletions(-) diff --git a/include/jemalloc/internal/edata.h b/include/jemalloc/internal/edata.h index e75866ba..648b478e 100644 --- a/include/jemalloc/internal/edata.h +++ b/include/jemalloc/internal/edata.h @@ -13,6 +13,12 @@ #include "jemalloc/internal/sz.h" #include "jemalloc/internal/typed_list.h" +/* + * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment + * to free up the low bits in the rtree leaf. + */ +#define EDATA_ALIGNMENT 128 + enum extent_state_e { extent_state_active = 0, extent_state_dirty = 1, @@ -88,7 +94,7 @@ struct edata_s { * f: nfree * s: bin_shard * - * 00000000 ... 000000ss ssssffff ffffffii iiiiiitt zpcbaaaa aaaaaaaa + * 00000000 ... 00000sss sssfffff fffffiii iiiiittt zpcbaaaa aaaaaaaa * * arena_ind: Arena from which this extent came, or all 1 bits if * unassociated. @@ -143,7 +149,7 @@ struct edata_s { #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) -#define EDATA_BITS_STATE_WIDTH 2 +#define EDATA_BITS_STATE_WIDTH 3 #define EDATA_BITS_STATE_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) #define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) diff --git a/include/jemalloc/internal/emap.h b/include/jemalloc/internal/emap.h index ac0050b5..3e397483 100644 --- a/include/jemalloc/internal/emap.h +++ b/include/jemalloc/internal/emap.h @@ -136,6 +136,36 @@ emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { } } +static inline void +emap_update_rtree_at_addr(tsdn_t *tsdn, rtree_t *rtree, edata_t *expected_edata, + uintptr_t addr, extent_state_t state) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + addr, /* dependent */ true, /* init_missing */ false); + assert(elm != NULL); + rtree_contents_t contents = rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ true); + assert(contents.edata == expected_edata); + contents.metadata.state = state; + rtree_leaf_elm_write(tsdn, rtree, elm, contents); +} + +static inline void +emap_edata_state_update(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_state_t state) { + /* Only emap is allowed to modify the edata internal state. */ + edata_state_set(edata, state); + + emap_update_rtree_at_addr(tsdn, &emap->rtree, edata, + (uintptr_t)edata_base_get(edata), state); + emap_update_rtree_at_addr(tsdn, &emap->rtree, edata, + (uintptr_t)edata_last_get(edata), state); + + emap_assert_mapped(tsdn, emap, edata); +} + JEMALLOC_ALWAYS_INLINE edata_t * emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) { rtree_ctx_t rtree_ctx_fallback; diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index 3b7972e4..89c08cb0 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -46,6 +46,7 @@ struct rtree_node_elm_s { typedef struct rtree_metadata_s rtree_metadata_t; struct rtree_metadata_s { szind_t szind; + extent_state_t state; /* Mirrors edata->state. */ bool is_head; /* Mirrors edata->is_head. */ bool slab; }; @@ -56,6 +57,10 @@ struct rtree_contents_s { rtree_metadata_t metadata; }; +#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH +#define RTREE_LEAF_STATE_SHIFT 2 +#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT) + struct rtree_leaf_elm_s { #ifdef RTREE_LEAF_COMPACT /* @@ -66,17 +71,17 @@ struct rtree_leaf_elm_s { * * x: index * e: edata + * s: state * h: is_head * b: slab * - * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee00hb + * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb */ atomic_p_t le_bits; #else atomic_p_t le_edata; /* (edata_t *) */ /* - * slab is stored in the low bit; szind is stored in the next lowest - * bits. + * From low to high bits: slab, is_head, state. */ atomic_u_t le_metadata; #endif @@ -184,15 +189,14 @@ JEMALLOC_ALWAYS_INLINE uintptr_t rtree_leaf_elm_bits_encode(rtree_contents_t contents) { uintptr_t edata_bits = (uintptr_t)contents.edata & (((uintptr_t)1 << LG_VADDR) - 1); + uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR; - /* - * Metadata shares the low bits of edata. edata is CACHELINE aligned (in - * fact, it's 128 bytes on 64-bit systems); we can enforce this - * alignment if we want to steal the extra rtree leaf bits someday. - */ uintptr_t slab_bits = (uintptr_t)contents.metadata.slab; uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1; - uintptr_t metadata_bits = szind_bits | is_head_bits | slab_bits; + uintptr_t state_bits = (uintptr_t)contents.metadata.state << + RTREE_LEAF_STATE_SHIFT; + uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits | + slab_bits; assert((edata_bits & metadata_bits) == 0); return edata_bits | metadata_bits; @@ -206,7 +210,11 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) { contents.metadata.slab = (bool)(bits & 1); contents.metadata.is_head = (bool)(bits & (1 << 1)); - uintptr_t metadata_mask = ~((uintptr_t)((1 << 2) - 1)); + uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >> + RTREE_LEAF_STATE_SHIFT; + contents.metadata.state = (extent_state_t)state_bits; + + uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1); # ifdef __aarch64__ /* * aarch64 doesn't sign extend the highest virtual address bit to set @@ -214,13 +222,12 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) { */ uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; /* Mask off metadata. */ - uintptr_t low_bit_mask = metadata_mask; uintptr_t mask = high_bit_mask & low_bit_mask; contents.edata = (edata_t *)(bits & mask); # else /* Restore sign-extended high bits, mask metadata bits. */ contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) - >> RTREE_NHIB) & metadata_mask); + >> RTREE_NHIB) & low_bit_mask); # endif return contents; } @@ -240,7 +247,12 @@ rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); contents.metadata.slab = (bool)(metadata_bits & 1); contents.metadata.is_head = (bool)(metadata_bits & (1 << 1)); - contents.metadata.szind = (metadata_bits >> 2); + + uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >> + RTREE_LEAF_STATE_SHIFT; + contents.metadata.state = (extent_state_t)state_bits; + contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT + + RTREE_LEAF_STATE_WIDTH); contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); @@ -252,13 +264,16 @@ rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, static inline void rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, rtree_contents_t contents) { + assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0); #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_encode(contents); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else - unsigned metadata_bits = ((unsigned)contents.metadata.slab + unsigned metadata_bits = (unsigned)contents.metadata.slab | ((unsigned)contents.metadata.is_head << 1) - | ((unsigned)contents.metadata.szind << 2)); + | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT) + | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT + + RTREE_LEAF_STATE_WIDTH)); atomic_store_u(&elm->le_metadata, metadata_bits, ATOMIC_RELEASE); /* * Write edata last, since the element is atomically considered valid @@ -430,6 +445,7 @@ rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, contents.metadata.szind = SC_NSIZES; contents.metadata.slab = false; contents.metadata.is_head = false; + contents.metadata.state = (extent_state_t)0; rtree_leaf_elm_write(tsdn, rtree, elm, contents); } diff --git a/src/base.c b/src/base.c index 00440f4d..9d4ce5c5 100644 --- a/src/base.c +++ b/src/base.c @@ -472,7 +472,7 @@ edata_t * base_alloc_edata(tsdn_t *tsdn, base_t *base) { size_t esn; edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t), - CACHELINE, &esn); + EDATA_ALIGNMENT, &esn); if (edata == NULL) { return NULL; } diff --git a/src/emap.c b/src/emap.c index 62abf4d8..4f3915b5 100644 --- a/src/emap.c +++ b/src/emap.c @@ -143,6 +143,7 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a, contents.metadata.slab = slab; contents.metadata.is_head = (edata == NULL) ? false : edata_is_head_get(edata); + contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata); rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents); if (elm_b != NULL) { rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents); @@ -170,11 +171,13 @@ emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata, EMAP_DECLARE_RTREE_CTX; assert(edata_slab_get(edata)); + assert(edata_state_get(edata) == extent_state_active); rtree_contents_t contents; contents.edata = edata; contents.metadata.szind = szind; contents.metadata.slab = true; + contents.metadata.state = extent_state_active; contents.metadata.is_head = false; /* Not allowed to access. */ /* Register interior. */ @@ -219,6 +222,7 @@ emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, contents.metadata.szind = szind; contents.metadata.slab = slab; contents.metadata.is_head = edata_is_head_get(edata); + contents.metadata.state = edata_state_get(edata); rtree_write(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)edata_addr_get(edata), contents); @@ -304,6 +308,7 @@ emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, clear_contents.metadata.szind = SC_NSIZES; clear_contents.metadata.slab = false; clear_contents.metadata.is_head = false; + clear_contents.metadata.state = (extent_state_t)0; if (prepare->lead_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &emap->rtree, @@ -331,6 +336,7 @@ emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { (uintptr_t)edata_base_get(edata)); assert(contents.edata == edata); assert(contents.metadata.is_head == edata_is_head_get(edata)); + assert(contents.metadata.state == edata_state_get(edata)); } void diff --git a/src/extent.c b/src/extent.c index a541e7bb..56ea33f6 100644 --- a/src/extent.c +++ b/src/extent.c @@ -64,11 +64,12 @@ extent_may_force_decay(pac_t *pac) { static bool extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata) { - edata_state_set(edata, extent_state_active); + emap_edata_state_update(tsdn, pac->emap, edata, extent_state_active); + bool coalesced; edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata, &coalesced, false); - edata_state_set(edata, ecache->state); + emap_edata_state_update(tsdn, pac->emap, edata, ecache->state); if (!coalesced) { return true; @@ -182,7 +183,8 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, not_reached(); case extent_state_dirty: case extent_state_muzzy: - edata_state_set(edata, extent_state_active); + emap_edata_state_update(tsdn, pac->emap, edata, + extent_state_active); break; case extent_state_retained: extent_deregister(tsdn, pac, edata); @@ -223,28 +225,30 @@ extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, } static void -extent_deactivate_locked(tsdn_t *tsdn, ecache_t *ecache, edata_t *edata) { +extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, + edata_t *edata) { assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache)); assert(edata_state_get(edata) == extent_state_active); - edata_state_set(edata, ecache->state); + emap_edata_state_update(tsdn, pac->emap, edata, ecache->state); eset_insert(&ecache->eset, edata); } static void -extent_deactivate(tsdn_t *tsdn, ecache_t *ecache, edata_t *edata) { +extent_deactivate(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, edata_t *edata) { malloc_mutex_lock(tsdn, &ecache->mtx); - extent_deactivate_locked(tsdn, ecache, edata); + extent_deactivate_locked(tsdn, pac, ecache, edata); malloc_mutex_unlock(tsdn, &ecache->mtx); } static void -extent_activate_locked(tsdn_t *tsdn, ecache_t *ecache, edata_t *edata) { +extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, + edata_t *edata) { assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache)); assert(edata_state_get(edata) == ecache->state); eset_remove(&ecache->eset, edata); - edata_state_set(edata, extent_state_active); + emap_edata_state_update(tsdn, pac->emap, edata, extent_state_active); } static void @@ -421,7 +425,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, return NULL; } - extent_activate_locked(tsdn, ecache, edata); + extent_activate_locked(tsdn, pac, ecache, edata); malloc_mutex_unlock(tsdn, &ecache->mtx); return edata; @@ -527,16 +531,16 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, * leaking the extent. */ assert(to_leak != NULL && lead == NULL && trail == NULL); - extent_deactivate(tsdn, ecache, to_leak); + extent_deactivate(tsdn, pac, ecache, to_leak); return NULL; } if (result == extent_split_interior_ok) { if (lead != NULL) { - extent_deactivate(tsdn, ecache, lead); + extent_deactivate(tsdn, pac, ecache, lead); } if (trail != NULL) { - extent_deactivate(tsdn, ecache, trail); + extent_deactivate(tsdn, pac, ecache, trail); } return edata; } else { @@ -837,7 +841,7 @@ extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *inner, edata_t *outer, bool forward, bool growing_retained) { assert(extent_can_coalesce(ecache, inner, outer)); - extent_activate_locked(tsdn, ecache, outer); + extent_activate_locked(tsdn, pac, ecache, outer); malloc_mutex_unlock(tsdn, &ecache->mtx); bool err = extent_merge_impl(tsdn, pac, ehooks, @@ -845,7 +849,7 @@ extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, malloc_mutex_lock(tsdn, &ecache->mtx); if (err) { - extent_deactivate_locked(tsdn, ecache, outer); + extent_deactivate_locked(tsdn, pac, ecache, outer); } return err; @@ -1008,7 +1012,7 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, return; } } - extent_deactivate_locked(tsdn, ecache, edata); + extent_deactivate_locked(tsdn, pac, ecache, edata); malloc_mutex_unlock(tsdn, &ecache->mtx); } diff --git a/src/hpa_central.c b/src/hpa_central.c index 346d9422..36758a03 100644 --- a/src/hpa_central.c +++ b/src/hpa_central.c @@ -91,7 +91,8 @@ label_success: */ assert(edata_state_get(edata) == extent_state_dirty); assert(edata_base_get(edata) == edata_addr_get(edata)); - edata_state_set(edata, extent_state_active); + emap_edata_state_update(tsdn, central->emap, edata, + extent_state_active); return edata; } @@ -136,7 +137,7 @@ hpa_central_alloc_grow(tsdn_t *tsdn, hpa_central_t *central, edata_sn_set(edata, sn); edata_sn_set(trail, sn); - edata_state_set(trail, extent_state_dirty); + emap_edata_state_update(tsdn, central->emap, trail, extent_state_dirty); eset_insert(¢ral->eset, trail); return false; } @@ -203,6 +204,6 @@ hpa_central_dalloc(tsdn_t *tsdn, hpa_central_t *central, edata_t *edata) { eset_remove(¢ral->eset, trail); hpa_central_dalloc_merge(tsdn, central, edata, trail); } - edata_state_set(edata, extent_state_dirty); + emap_edata_state_update(tsdn, central->emap, edata, extent_state_dirty); eset_insert(¢ral->eset, edata); } diff --git a/test/unit/rtree.c b/test/unit/rtree.c index a547f188..9251652c 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -32,12 +32,22 @@ TEST_END #undef NITERS #undef SEED +static edata_t * +alloc_edata(void) { + void *ret = mallocx(sizeof(edata_t), MALLOCX_ALIGN(EDATA_ALIGNMENT)); + assert_ptr_not_null(ret, "Unexpected mallocx() failure"); + + return ret; +} + TEST_BEGIN(test_rtree_extrema) { - edata_t edata_a = {0}, edata_b = {0}; - edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS, + edata_t *edata_a, *edata_b; + edata_a = alloc_edata(); + edata_b = alloc_edata(); + edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS, false, sz_size2index(SC_LARGE_MINCLASS), 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); - edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, + edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); tsdn_t *tsdn = tsdn_fetch(); @@ -52,10 +62,11 @@ TEST_BEGIN(test_rtree_extrema) { "Unexpected rtree_new() failure"); rtree_contents_t contents_a; - contents_a.edata = &edata_a; - contents_a.metadata.szind = edata_szind_get(&edata_a); - contents_a.metadata.slab = edata_slab_get(&edata_a); - contents_a.metadata.is_head = edata_is_head_get(&edata_a); + contents_a.edata = edata_a; + contents_a.metadata.szind = edata_szind_get(edata_a); + contents_a.metadata.slab = edata_slab_get(edata_a); + contents_a.metadata.is_head = edata_is_head_get(edata_a); + contents_a.metadata.state = edata_state_get(edata_a); expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a), "Unexpected rtree_write() failure"); expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a), @@ -65,14 +76,16 @@ TEST_BEGIN(test_rtree_extrema) { expect_true(contents_a.edata == read_contents_a.edata && contents_a.metadata.szind == read_contents_a.metadata.szind && contents_a.metadata.slab == read_contents_a.metadata.slab - && contents_a.metadata.is_head == read_contents_a.metadata.is_head, + && contents_a.metadata.is_head == read_contents_a.metadata.is_head + && contents_a.metadata.state == read_contents_a.metadata.state, "rtree_read() should return previously set value"); rtree_contents_t contents_b; - contents_b.edata = &edata_b; - contents_b.metadata.szind = edata_szind_get_maybe_invalid(&edata_b); - contents_b.metadata.slab = edata_slab_get(&edata_b); - contents_b.metadata.is_head = edata_is_head_get(&edata_b); + contents_b.edata = edata_b; + contents_b.metadata.szind = edata_szind_get_maybe_invalid(edata_b); + contents_b.metadata.slab = edata_slab_get(edata_b); + contents_b.metadata.is_head = edata_is_head_get(edata_b); + contents_b.metadata.state = edata_state_get(edata_b); expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), contents_b), "Unexpected rtree_write() failure"); rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx, @@ -80,7 +93,8 @@ TEST_BEGIN(test_rtree_extrema) { assert_true(contents_b.edata == read_contents_b.edata && contents_b.metadata.szind == read_contents_b.metadata.szind && contents_b.metadata.slab == read_contents_b.metadata.slab - && contents_b.metadata.is_head == read_contents_b.metadata.is_head, + && contents_b.metadata.is_head == read_contents_b.metadata.is_head + && contents_b.metadata.state == read_contents_b.metadata.state, "rtree_read() should return previously set value"); base_delete(tsdn, base); @@ -94,9 +108,8 @@ TEST_BEGIN(test_rtree_bits) { uintptr_t keys[] = {PAGE, PAGE + 1, PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; - - edata_t edata = {0}; - edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, + edata_t *edata_c = alloc_edata(); + edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); rtree_t *rtree = &test_rtree; @@ -107,16 +120,17 @@ TEST_BEGIN(test_rtree_bits) { for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { rtree_contents_t contents; - contents.edata = &edata; + contents.edata = edata_c; contents.metadata.szind = SC_NSIZES; contents.metadata.slab = false; contents.metadata.is_head = false; + contents.metadata.state = extent_state_active; expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], contents), "Unexpected rtree_write() failure"); for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - keys[j]).edata, &edata, + keys[j]).edata, edata_c, "rtree_edata_read() should return previously set " "value and ignore insignificant key bits; i=%u, " "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, @@ -146,8 +160,8 @@ TEST_BEGIN(test_rtree_random) { rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - edata_t edata = {0}; - edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, + edata_t *edata_d = alloc_edata(); + edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); expect_false(rtree_new(rtree, base, false), @@ -160,18 +174,19 @@ TEST_BEGIN(test_rtree_random) { expect_ptr_not_null(elm, "Unexpected rtree_leaf_elm_lookup() failure"); rtree_contents_t contents; - contents.edata = &edata; + contents.edata = edata_d; contents.metadata.szind = SC_NSIZES; contents.metadata.slab = false; contents.metadata.is_head = false; + contents.metadata.state = edata_state_get(edata_d); rtree_leaf_elm_write(tsdn, rtree, elm, contents); expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - keys[i]).edata, &edata, + keys[i]).edata, edata_d, "rtree_edata_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - keys[i]).edata, &edata, + keys[i]).edata, edata_d, "rtree_edata_read() should return previously set value, " "i=%u", i); }