Remove rtree support for 0 (NULL) keys.
NULL can never actually be inserted in practice, and removing support allows a branch to be removed from the fast path.
This commit is contained in:
parent
f5cf9b19c8
commit
650c070e10
@ -79,43 +79,41 @@ rtree_elm_write(rtree_elm_t *elm, const extent_t *extent) {
|
||||
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
||||
rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
assert(key != 0);
|
||||
assert(!dependent || !init_missing);
|
||||
|
||||
if (likely(key != 0)) {
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
#define RTREE_CACHE_CHECK(i) do { \
|
||||
if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \
|
||||
rtree_elm_t *leaf = rtree_ctx->cache[i].leaf; \
|
||||
if (likely(leaf != NULL)) { \
|
||||
/* Reorder. */ \
|
||||
memmove(&rtree_ctx->cache[1], \
|
||||
&rtree_ctx->cache[0], \
|
||||
sizeof(rtree_ctx_cache_elm_t) * i); \
|
||||
rtree_ctx->cache[0].leafkey = leafkey; \
|
||||
rtree_ctx->cache[0].leaf = leaf; \
|
||||
if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \
|
||||
rtree_elm_t *leaf = rtree_ctx->cache[i].leaf; \
|
||||
if (likely(leaf != NULL)) { \
|
||||
/* Reorder. */ \
|
||||
memmove(&rtree_ctx->cache[1], \
|
||||
&rtree_ctx->cache[0], \
|
||||
sizeof(rtree_ctx_cache_elm_t) * i); \
|
||||
rtree_ctx->cache[0].leafkey = leafkey; \
|
||||
rtree_ctx->cache[0].leaf = leaf; \
|
||||
\
|
||||
uintptr_t subkey = rtree_subkey(key, \
|
||||
RTREE_HEIGHT-1); \
|
||||
return &leaf[subkey]; \
|
||||
} \
|
||||
uintptr_t subkey = rtree_subkey(key, \
|
||||
RTREE_HEIGHT-1); \
|
||||
return &leaf[subkey]; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
/* Check the MRU cache entry. */
|
||||
RTREE_CACHE_CHECK(0);
|
||||
/*
|
||||
* Search the remaining cache elements, and on success move the
|
||||
* matching element to the front. Unroll the first iteration to
|
||||
* avoid calling memmove() (the compiler typically optimizes it
|
||||
* into raw moves).
|
||||
*/
|
||||
if (RTREE_CTX_NCACHE > 1) {
|
||||
RTREE_CACHE_CHECK(1);
|
||||
}
|
||||
for (unsigned i = 2; i < RTREE_CTX_NCACHE; i++) {
|
||||
RTREE_CACHE_CHECK(i);
|
||||
}
|
||||
#undef RTREE_CACHE_CHECK
|
||||
/* Check the MRU cache entry. */
|
||||
RTREE_CACHE_CHECK(0);
|
||||
/*
|
||||
* Search the remaining cache elements, and on success move the matching
|
||||
* element to the front. Unroll the first iteration to avoid calling
|
||||
* memmove() (the compiler typically optimizes it into raw moves).
|
||||
*/
|
||||
if (RTREE_CTX_NCACHE > 1) {
|
||||
RTREE_CACHE_CHECK(1);
|
||||
}
|
||||
for (unsigned i = 2; i < RTREE_CTX_NCACHE; i++) {
|
||||
RTREE_CACHE_CHECK(i);
|
||||
}
|
||||
#undef RTREE_CACHE_CHECK
|
||||
|
||||
return rtree_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent,
|
||||
init_missing);
|
||||
|
18
src/rtree.c
18
src/rtree.c
@ -170,17 +170,15 @@ rtree_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
* node is a leaf, so it contains values rather than \
|
||||
* child pointers. \
|
||||
*/ \
|
||||
if (likely(key != 0)) { \
|
||||
if (RTREE_CTX_NCACHE > 1) { \
|
||||
memmove(&rtree_ctx->cache[1], \
|
||||
&rtree_ctx->cache[0], \
|
||||
sizeof(rtree_ctx_cache_elm_t) * \
|
||||
(RTREE_CTX_NCACHE-1)); \
|
||||
} \
|
||||
uintptr_t leafkey = rtree_leafkey(key); \
|
||||
rtree_ctx->cache[0].leafkey = leafkey; \
|
||||
rtree_ctx->cache[0].leaf = node; \
|
||||
if (RTREE_CTX_NCACHE > 1) { \
|
||||
memmove(&rtree_ctx->cache[1], \
|
||||
&rtree_ctx->cache[0], \
|
||||
sizeof(rtree_ctx_cache_elm_t) * \
|
||||
(RTREE_CTX_NCACHE-1)); \
|
||||
} \
|
||||
uintptr_t leafkey = rtree_leafkey(key); \
|
||||
rtree_ctx->cache[0].leafkey = leafkey; \
|
||||
rtree_ctx->cache[0].leaf = node; \
|
||||
uintptr_t subkey = rtree_subkey(key, level); \
|
||||
return &node[subkey]; \
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ TEST_BEGIN(test_rtree_read_empty) {
|
||||
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
|
||||
test_rtree = &rtree;
|
||||
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
|
||||
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, 0, false),
|
||||
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, PAGE, false),
|
||||
"rtree_read() should return NULL for empty tree");
|
||||
rtree_delete(tsdn, &rtree);
|
||||
test_rtree = NULL;
|
||||
@ -139,9 +139,10 @@ TEST_BEGIN(test_rtree_extrema) {
|
||||
test_rtree = &rtree;
|
||||
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
|
||||
|
||||
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, 0, &extent_a),
|
||||
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, PAGE, &extent_a),
|
||||
"Unexpected rtree_write() failure");
|
||||
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, 0, true), &extent_a,
|
||||
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, PAGE, true),
|
||||
&extent_a,
|
||||
"rtree_read() should return previously set value");
|
||||
|
||||
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0),
|
||||
@ -158,7 +159,8 @@ TEST_END
|
||||
TEST_BEGIN(test_rtree_bits) {
|
||||
tsdn_t *tsdn = tsdn_fetch();
|
||||
|
||||
uintptr_t keys[] = {0, 1, (((uintptr_t)1) << LG_PAGE) - 1};
|
||||
uintptr_t keys[] = {PAGE, PAGE + 1,
|
||||
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
|
||||
|
||||
extent_t extent;
|
||||
rtree_t rtree;
|
||||
@ -180,7 +182,7 @@ TEST_BEGIN(test_rtree_bits) {
|
||||
"key=%#"FMTxPTR, i, j, keys[i], keys[j]);
|
||||
}
|
||||
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx,
|
||||
(((uintptr_t)1) << LG_PAGE), false),
|
||||
(((uintptr_t)2) << LG_PAGE), false),
|
||||
"Only leftmost rtree leaf should be set; i=%u", i);
|
||||
rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user