Split rtree_elm_t into rtree_{node,leaf}_elm_t.
This allows leaf elements to differ in size from internal node elements. In principle it would be more correct to use a different type for each level of the tree, but due to implementation details related to atomic operations, we use casts anyway, thus counteracting the value of additional type correctness. Furthermore, such a scheme would require function code generation (via cpp macros), as well as either unwieldy type names for leaves or type aliases, e.g. typedef struct rtree_elm_d2_s rtree_leaf_elm_t; This alternate strategy would be more correct, and with less code duplication, but probably not worth the complexity.
This commit is contained in:
@@ -2,19 +2,21 @@
|
||||
|
||||
rtree_node_alloc_t *rtree_node_alloc_orig;
|
||||
rtree_node_dalloc_t *rtree_node_dalloc_orig;
|
||||
rtree_leaf_alloc_t *rtree_leaf_alloc_orig;
|
||||
rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig;
|
||||
|
||||
rtree_t *test_rtree;
|
||||
|
||||
static rtree_elm_t *
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
rtree_elm_t *node;
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
if (rtree != test_rtree) {
|
||||
return rtree_node_alloc_orig(tsdn, rtree, nelms);
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
node = (rtree_elm_t *)calloc(nelms, sizeof(rtree_elm_t));
|
||||
node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t));
|
||||
assert_ptr_not_null(node, "Unexpected calloc() failure");
|
||||
malloc_mutex_lock(tsdn, &rtree->init_lock);
|
||||
|
||||
@@ -22,7 +24,8 @@ rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
}
|
||||
|
||||
static void
|
||||
rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) {
|
||||
rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_node_elm_t *node) {
|
||||
if (rtree != test_rtree) {
|
||||
rtree_node_dalloc_orig(tsdn, rtree, node);
|
||||
return;
|
||||
@@ -31,6 +34,33 @@ rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) {
|
||||
free(node);
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
rtree_leaf_elm_t *leaf;
|
||||
|
||||
if (rtree != test_rtree) {
|
||||
return rtree_leaf_alloc_orig(tsdn, rtree, nelms);
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t));
|
||||
assert_ptr_not_null(leaf, "Unexpected calloc() failure");
|
||||
malloc_mutex_lock(tsdn, &rtree->init_lock);
|
||||
|
||||
return leaf;
|
||||
}
|
||||
|
||||
static void
|
||||
rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *leaf) {
|
||||
if (rtree != test_rtree) {
|
||||
rtree_leaf_dalloc_orig(tsdn, rtree, leaf);
|
||||
return;
|
||||
}
|
||||
|
||||
free(leaf);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_rtree_read_empty) {
|
||||
tsdn_t *tsdn;
|
||||
|
||||
@@ -75,22 +105,20 @@ thd_start(void *varg) {
|
||||
uintptr_t key = (uintptr_t)(gen_rand64(sfmt) & ((ZU(1) <<
|
||||
MAX_NBITS) - ZU(1)));
|
||||
if (i % 2 == 0) {
|
||||
rtree_elm_t *elm;
|
||||
|
||||
elm = rtree_elm_acquire(tsdn, &arg->rtree, &rtree_ctx,
|
||||
key, false, true);
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn,
|
||||
&arg->rtree, &rtree_ctx, key, false, true);
|
||||
assert_ptr_not_null(elm,
|
||||
"Unexpected rtree_elm_acquire() failure");
|
||||
rtree_elm_write_acquired(tsdn, &arg->rtree, elm,
|
||||
"Unexpected rtree_leaf_elm_acquire() failure");
|
||||
rtree_leaf_elm_write_acquired(tsdn, &arg->rtree, elm,
|
||||
extent);
|
||||
rtree_elm_release(tsdn, &arg->rtree, elm);
|
||||
rtree_leaf_elm_release(tsdn, &arg->rtree, elm);
|
||||
|
||||
elm = rtree_elm_acquire(tsdn, &arg->rtree, &rtree_ctx,
|
||||
key, true, false);
|
||||
elm = rtree_leaf_elm_acquire(tsdn, &arg->rtree,
|
||||
&rtree_ctx, key, true, false);
|
||||
assert_ptr_not_null(elm,
|
||||
"Unexpected rtree_elm_acquire() failure");
|
||||
rtree_elm_read_acquired(tsdn, &arg->rtree, elm);
|
||||
rtree_elm_release(tsdn, &arg->rtree, elm);
|
||||
"Unexpected rtree_leaf_elm_acquire() failure");
|
||||
rtree_leaf_elm_read_acquired(tsdn, &arg->rtree, elm);
|
||||
rtree_leaf_elm_release(tsdn, &arg->rtree, elm);
|
||||
} else {
|
||||
rtree_read(tsdn, &arg->rtree, &rtree_ctx, key, false);
|
||||
}
|
||||
@@ -201,19 +229,18 @@ TEST_BEGIN(test_rtree_random) {
|
||||
extent_t extent;
|
||||
rtree_t rtree;
|
||||
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
|
||||
rtree_elm_t *elm;
|
||||
|
||||
test_rtree = &rtree;
|
||||
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
|
||||
|
||||
for (unsigned i = 0; i < NSET; i++) {
|
||||
keys[i] = (uintptr_t)gen_rand64(sfmt);
|
||||
elm = rtree_elm_acquire(tsdn, &rtree, &rtree_ctx, keys[i],
|
||||
false, true);
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn, &rtree,
|
||||
&rtree_ctx, keys[i], false, true);
|
||||
assert_ptr_not_null(elm,
|
||||
"Unexpected rtree_elm_acquire() failure");
|
||||
rtree_elm_write_acquired(tsdn, &rtree, elm, &extent);
|
||||
rtree_elm_release(tsdn, &rtree, elm);
|
||||
"Unexpected rtree_leaf_elm_acquire() failure");
|
||||
rtree_leaf_elm_write_acquired(tsdn, &rtree, elm, &extent);
|
||||
rtree_leaf_elm_release(tsdn, &rtree, elm);
|
||||
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i],
|
||||
true), &extent,
|
||||
"rtree_read() should return previously set value");
|
||||
@@ -248,6 +275,10 @@ main(void) {
|
||||
rtree_node_alloc = rtree_node_alloc_intercept;
|
||||
rtree_node_dalloc_orig = rtree_node_dalloc;
|
||||
rtree_node_dalloc = rtree_node_dalloc_intercept;
|
||||
rtree_leaf_alloc_orig = rtree_leaf_alloc;
|
||||
rtree_leaf_alloc = rtree_leaf_alloc_intercept;
|
||||
rtree_leaf_dalloc_orig = rtree_leaf_dalloc;
|
||||
rtree_leaf_dalloc = rtree_leaf_dalloc_intercept;
|
||||
test_rtree = NULL;
|
||||
|
||||
return test(
|
||||
|
Reference in New Issue
Block a user