diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h index dcac3b6a..628e393b 100644 --- a/include/jemalloc/internal/base.h +++ b/include/jemalloc/internal/base.h @@ -86,7 +86,8 @@ metadata_thp_enabled(void) { } base_t *b0get(void); -base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +base_t *base_new(tsdn_t *tsdn, unsigned ind, + const extent_hooks_t *extent_hooks); void base_delete(tsdn_t *tsdn, base_t *base); ehooks_t *base_ehooks_get(base_t *base); extent_hooks_t *base_extent_hooks_set(base_t *base, diff --git a/include/jemalloc/internal/emap.h b/include/jemalloc/internal/emap.h index b51a0c53..b9f6bc06 100644 --- a/include/jemalloc/internal/emap.h +++ b/include/jemalloc/internal/emap.h @@ -1,6 +1,7 @@ #ifndef JEMALLOC_INTERNAL_EMAP_H #define JEMALLOC_INTERNAL_EMAP_H +#include "jemalloc/internal/base.h" #include "jemalloc/internal/mutex_pool.h" #include "jemalloc/internal/rtree.h" @@ -27,7 +28,7 @@ struct emap_full_alloc_ctx_s { extern emap_t emap_global; -bool emap_init(emap_t *emap, bool zeroed); +bool emap_init(emap_t *emap, base_t *base, bool zeroed); /* * Grab the lock or locks associated with the edata or edatas indicated (which diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index 11a52ed0..094cc1ad 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -78,6 +78,7 @@ struct rtree_level_s { typedef struct rtree_s rtree_t; struct rtree_s { + base_t *base; malloc_mutex_t init_lock; /* Number of elements based on rtree_levels[0].bits. */ #if RTREE_HEIGHT > 1 @@ -109,22 +110,8 @@ static const rtree_level_t rtree_levels[] = { #endif }; -bool rtree_new(rtree_t *rtree, bool zeroed); +bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed); -typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); -extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; - -typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); -extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; - -typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); -extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; - -typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); -extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; -#ifdef JEMALLOC_JET -void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); -#endif rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); diff --git a/src/base.c b/src/base.c index 595b7710..ebb42da5 100644 --- a/src/base.c +++ b/src/base.c @@ -343,7 +343,7 @@ b0get(void) { } base_t * -base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { +base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks) { pszind_t pind_last = 0; size_t extent_sn_next = 0; @@ -353,7 +353,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { * memory, and then initialize the ehooks within the base_t. */ ehooks_t fake_ehooks; - ehooks_init(&fake_ehooks, extent_hooks, ind); + ehooks_init(&fake_ehooks, (extent_hooks_t *)extent_hooks, ind); base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind, &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); @@ -366,7 +366,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata, &gap_size, base_size, base_alignment); - ehooks_init(&base->ehooks, extent_hooks, ind); + ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind); if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, malloc_mutex_rank_exclusive)) { base_unmap(tsdn, &fake_ehooks, ind, block, block->size); diff --git a/src/emap.c b/src/emap.c index 200a7828..723dfad2 100644 --- a/src/emap.c +++ b/src/emap.c @@ -22,9 +22,9 @@ enum emap_lock_result_e { typedef enum emap_lock_result_e emap_lock_result_t; bool -emap_init(emap_t *emap, bool zeroed) { +emap_init(emap_t *emap, base_t *base, bool zeroed) { bool err; - err = rtree_new(&emap->rtree, zeroed); + err = rtree_new(&emap->rtree, base, zeroed); if (err) { return true; } diff --git a/src/jemalloc.c b/src/jemalloc.c index e2adffd2..6dc2e475 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1573,7 +1573,7 @@ malloc_init_hard_a0_locked() { return true; } /* emap_global is static, hence zeroed. */ - if (emap_init(&emap_global, /* zeroed */ true)) { + if (emap_init(&emap_global, b0get(), /* zeroed */ true)) { return true; } if (extent_boot()) { diff --git a/src/rtree.c b/src/rtree.c index 4ae41fe2..07a4e9ac 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -10,7 +10,7 @@ * used. */ bool -rtree_new(rtree_t *rtree, bool zeroed) { +rtree_new(rtree_t *rtree, base_t *base, bool zeroed) { #ifdef JEMALLOC_JET if (!zeroed) { memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ @@ -18,6 +18,7 @@ rtree_new(rtree_t *rtree, bool zeroed) { #else assert(zeroed); #endif + rtree->base = base; if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, malloc_mutex_rank_exclusive)) { @@ -28,75 +29,16 @@ rtree_new(rtree_t *rtree, bool zeroed) { } static rtree_node_elm_t * -rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * - sizeof(rtree_node_elm_t), CACHELINE); +rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base, + nelms * sizeof(rtree_node_elm_t), CACHELINE); } -rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; - -static void -rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { - /* Nodes are never deleted during normal operation. */ - not_reached(); -} -rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = - rtree_node_dalloc_impl; static rtree_leaf_elm_t * -rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * - sizeof(rtree_leaf_elm_t), CACHELINE); +rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base, + nelms * sizeof(rtree_leaf_elm_t), CACHELINE); } -rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; - -static void -rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { - /* Leaves are never deleted during normal operation. */ - not_reached(); -} -rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = - rtree_leaf_dalloc_impl; - -#ifdef JEMALLOC_JET -# if RTREE_HEIGHT > 1 -static void -rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, - unsigned level) { - size_t nchildren = ZU(1) << rtree_levels[level].bits; - if (level + 2 < RTREE_HEIGHT) { - for (size_t i = 0; i < nchildren; i++) { - rtree_node_elm_t *node = - (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, - ATOMIC_RELAXED); - if (node != NULL) { - rtree_delete_subtree(tsdn, rtree, node, level + - 1); - } - } - } else { - for (size_t i = 0; i < nchildren; i++) { - rtree_leaf_elm_t *leaf = - (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, - ATOMIC_RELAXED); - if (leaf != NULL) { - rtree_leaf_dalloc(tsdn, rtree, leaf); - } - } - } - - if (subtree != rtree->root) { - rtree_node_dalloc(tsdn, rtree, subtree); - } -} -# endif - -void -rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { -# if RTREE_HEIGHT > 1 - rtree_delete_subtree(tsdn, rtree, rtree->root, 0); -# endif -} -#endif static rtree_node_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, diff --git a/test/unit/rtree.c b/test/unit/rtree.c index 2477db03..b5ece82c 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -4,80 +4,26 @@ #define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1) -rtree_node_alloc_t *rtree_node_alloc_orig; -rtree_node_dalloc_t *rtree_node_dalloc_orig; -rtree_leaf_alloc_t *rtree_leaf_alloc_orig; -rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig; - /* Potentially too large to safely place on the stack. */ rtree_t test_rtree; -static rtree_node_elm_t * -rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - rtree_node_elm_t *node; - - if (rtree != &test_rtree) { - return rtree_node_alloc_orig(tsdn, rtree, nelms); - } - - malloc_mutex_unlock(tsdn, &rtree->init_lock); - node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)); - assert_ptr_not_null(node, "Unexpected calloc() failure"); - malloc_mutex_lock(tsdn, &rtree->init_lock); - - return node; -} - -static void -rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, - rtree_node_elm_t *node) { - if (rtree != &test_rtree) { - rtree_node_dalloc_orig(tsdn, rtree, node); - return; - } - - free(node); -} - -static rtree_leaf_elm_t * -rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - rtree_leaf_elm_t *leaf; - - if (rtree != &test_rtree) { - return rtree_leaf_alloc_orig(tsdn, rtree, nelms); - } - - malloc_mutex_unlock(tsdn, &rtree->init_lock); - leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t)); - assert_ptr_not_null(leaf, "Unexpected calloc() failure"); - malloc_mutex_lock(tsdn, &rtree->init_lock); - - return leaf; -} - -static void -rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *leaf) { - if (rtree != &test_rtree) { - rtree_leaf_dalloc_orig(tsdn, rtree, leaf); - return; - } - - free(leaf); -} - TEST_BEGIN(test_rtree_read_empty) { tsdn_t *tsdn; tsdn = tsdn_fetch(); + base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks); + assert_ptr_not_null(base, "Unexpected base_new failure"); + rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_false(rtree_new(rtree, base, false), + "Unexpected rtree_new() failure"); assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE, false), "rtree_edata_read() should return NULL for empty tree"); - rtree_delete(tsdn, rtree); + + base_delete(tsdn, base); } TEST_END @@ -95,10 +41,14 @@ TEST_BEGIN(test_rtree_extrema) { tsdn_t *tsdn = tsdn_fetch(); + base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks); + assert_ptr_not_null(base, "Unexpected base_new failure"); + rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_false(rtree_new(rtree, base, false), + "Unexpected rtree_new() failure"); assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &edata_a, edata_szind_get(&edata_a), edata_slab_get(&edata_a)), @@ -116,12 +66,14 @@ TEST_BEGIN(test_rtree_extrema) { ~((uintptr_t)0), true), &edata_b, "rtree_edata_read() should return previously set value"); - rtree_delete(tsdn, rtree); + base_delete(tsdn, base); } TEST_END TEST_BEGIN(test_rtree_bits) { tsdn_t *tsdn = tsdn_fetch(); + base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks); + assert_ptr_not_null(base, "Unexpected base_new failure"); uintptr_t keys[] = {PAGE, PAGE + 1, PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; @@ -133,7 +85,8 @@ TEST_BEGIN(test_rtree_bits) { rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_false(rtree_new(rtree, base, false), + "Unexpected rtree_new() failure"); for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], @@ -153,7 +106,7 @@ TEST_BEGIN(test_rtree_bits) { rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); } - rtree_delete(tsdn, rtree); + base_delete(tsdn, base); } TEST_END @@ -162,6 +115,10 @@ TEST_BEGIN(test_rtree_random) { #define SEED 42 sfmt_t *sfmt = init_gen_rand(SEED); tsdn_t *tsdn = tsdn_fetch(); + + base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks); + assert_ptr_not_null(base, "Unexpected base_new failure"); + uintptr_t keys[NSET]; rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; @@ -171,7 +128,8 @@ TEST_BEGIN(test_rtree_random) { edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, true, EXTENT_NOT_HEAD); - assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_false(rtree_new(rtree, base, false), + "Unexpected rtree_new() failure"); for (unsigned i = 0; i < NSET; i++) { keys[i] = (uintptr_t)gen_rand64(sfmt); @@ -204,7 +162,7 @@ TEST_BEGIN(test_rtree_random) { "rtree_edata_read() should return previously set value"); } - rtree_delete(tsdn, rtree); + base_delete(tsdn, base); fini_gen_rand(sfmt); #undef NSET #undef SEED @@ -213,15 +171,6 @@ TEST_END int main(void) { - rtree_node_alloc_orig = rtree_node_alloc; - rtree_node_alloc = rtree_node_alloc_intercept; - rtree_node_dalloc_orig = rtree_node_dalloc; - rtree_node_dalloc = rtree_node_dalloc_intercept; - rtree_leaf_alloc_orig = rtree_leaf_alloc; - rtree_leaf_alloc = rtree_leaf_alloc_intercept; - rtree_leaf_dalloc_orig = rtree_leaf_dalloc; - rtree_leaf_dalloc = rtree_leaf_dalloc_intercept; - return test( test_rtree_read_empty, test_rtree_extrema,