Rtree: take the base allocator as a parameter.
This facilitates better testing by avoiding mixing of the "real" base with the base used by the rtree under test.
This commit is contained in:
committed by
David Goldblatt
parent
7013716aaa
commit
a0c1f4ac57
@@ -343,7 +343,7 @@ b0get(void) {
|
||||
}
|
||||
|
||||
base_t *
|
||||
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks) {
|
||||
pszind_t pind_last = 0;
|
||||
size_t extent_sn_next = 0;
|
||||
|
||||
@@ -353,7 +353,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
* memory, and then initialize the ehooks within the base_t.
|
||||
*/
|
||||
ehooks_t fake_ehooks;
|
||||
ehooks_init(&fake_ehooks, extent_hooks, ind);
|
||||
ehooks_init(&fake_ehooks, (extent_hooks_t *)extent_hooks, ind);
|
||||
|
||||
base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
|
||||
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
|
||||
@@ -366,7 +366,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
||||
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
|
||||
&gap_size, base_size, base_alignment);
|
||||
ehooks_init(&base->ehooks, extent_hooks, ind);
|
||||
ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
|
||||
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
|
||||
|
@@ -22,9 +22,9 @@ enum emap_lock_result_e {
|
||||
typedef enum emap_lock_result_e emap_lock_result_t;
|
||||
|
||||
bool
|
||||
emap_init(emap_t *emap, bool zeroed) {
|
||||
emap_init(emap_t *emap, base_t *base, bool zeroed) {
|
||||
bool err;
|
||||
err = rtree_new(&emap->rtree, zeroed);
|
||||
err = rtree_new(&emap->rtree, base, zeroed);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
|
@@ -1573,7 +1573,7 @@ malloc_init_hard_a0_locked() {
|
||||
return true;
|
||||
}
|
||||
/* emap_global is static, hence zeroed. */
|
||||
if (emap_init(&emap_global, /* zeroed */ true)) {
|
||||
if (emap_init(&emap_global, b0get(), /* zeroed */ true)) {
|
||||
return true;
|
||||
}
|
||||
if (extent_boot()) {
|
||||
|
74
src/rtree.c
74
src/rtree.c
@@ -10,7 +10,7 @@
|
||||
* used.
|
||||
*/
|
||||
bool
|
||||
rtree_new(rtree_t *rtree, bool zeroed) {
|
||||
rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
|
||||
#ifdef JEMALLOC_JET
|
||||
if (!zeroed) {
|
||||
memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
|
||||
@@ -18,6 +18,7 @@ rtree_new(rtree_t *rtree, bool zeroed) {
|
||||
#else
|
||||
assert(zeroed);
|
||||
#endif
|
||||
rtree->base = base;
|
||||
|
||||
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
@@ -28,75 +29,16 @@ rtree_new(rtree_t *rtree, bool zeroed) {
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
|
||||
sizeof(rtree_node_elm_t), CACHELINE);
|
||||
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base,
|
||||
nelms * sizeof(rtree_node_elm_t), CACHELINE);
|
||||
}
|
||||
rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
|
||||
|
||||
static void
|
||||
rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
|
||||
/* Nodes are never deleted during normal operation. */
|
||||
not_reached();
|
||||
}
|
||||
rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
|
||||
rtree_node_dalloc_impl;
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
|
||||
sizeof(rtree_leaf_elm_t), CACHELINE);
|
||||
rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base,
|
||||
nelms * sizeof(rtree_leaf_elm_t), CACHELINE);
|
||||
}
|
||||
rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
|
||||
|
||||
static void
|
||||
rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
|
||||
/* Leaves are never deleted during normal operation. */
|
||||
not_reached();
|
||||
}
|
||||
rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
|
||||
rtree_leaf_dalloc_impl;
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
# if RTREE_HEIGHT > 1
|
||||
static void
|
||||
rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
|
||||
unsigned level) {
|
||||
size_t nchildren = ZU(1) << rtree_levels[level].bits;
|
||||
if (level + 2 < RTREE_HEIGHT) {
|
||||
for (size_t i = 0; i < nchildren; i++) {
|
||||
rtree_node_elm_t *node =
|
||||
(rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
|
||||
ATOMIC_RELAXED);
|
||||
if (node != NULL) {
|
||||
rtree_delete_subtree(tsdn, rtree, node, level +
|
||||
1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < nchildren; i++) {
|
||||
rtree_leaf_elm_t *leaf =
|
||||
(rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
|
||||
ATOMIC_RELAXED);
|
||||
if (leaf != NULL) {
|
||||
rtree_leaf_dalloc(tsdn, rtree, leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (subtree != rtree->root) {
|
||||
rtree_node_dalloc(tsdn, rtree, subtree);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
|
||||
void
|
||||
rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
|
||||
# if RTREE_HEIGHT > 1
|
||||
rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
|
||||
|
Reference in New Issue
Block a user