Remove rtree support for 0 (NULL) keys.

NULL can never actually be inserted in practice, and removing support
allows a branch to be removed from the fast path.
This commit is contained in:
Jason Evans 2017-02-05 02:50:59 -08:00
parent f5cf9b19c8
commit 650c070e10
3 changed files with 43 additions and 45 deletions

View File

@ -79,9 +79,9 @@ rtree_elm_write(rtree_elm_t *elm, const extent_t *extent) {
JEMALLOC_ALWAYS_INLINE rtree_elm_t * JEMALLOC_ALWAYS_INLINE rtree_elm_t *
rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, bool init_missing) { uintptr_t key, bool dependent, bool init_missing) {
assert(key != 0);
assert(!dependent || !init_missing); assert(!dependent || !init_missing);
if (likely(key != 0)) {
uintptr_t leafkey = rtree_leafkey(key); uintptr_t leafkey = rtree_leafkey(key);
#define RTREE_CACHE_CHECK(i) do { \ #define RTREE_CACHE_CHECK(i) do { \
if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \ if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \
@ -103,10 +103,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
/* Check the MRU cache entry. */ /* Check the MRU cache entry. */
RTREE_CACHE_CHECK(0); RTREE_CACHE_CHECK(0);
/* /*
* Search the remaining cache elements, and on success move the * Search the remaining cache elements, and on success move the matching
* matching element to the front. Unroll the first iteration to * element to the front. Unroll the first iteration to avoid calling
* avoid calling memmove() (the compiler typically optimizes it * memmove() (the compiler typically optimizes it into raw moves).
* into raw moves).
*/ */
if (RTREE_CTX_NCACHE > 1) { if (RTREE_CTX_NCACHE > 1) {
RTREE_CACHE_CHECK(1); RTREE_CACHE_CHECK(1);
@ -115,7 +114,6 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
RTREE_CACHE_CHECK(i); RTREE_CACHE_CHECK(i);
} }
#undef RTREE_CACHE_CHECK #undef RTREE_CACHE_CHECK
}
return rtree_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent, return rtree_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent,
init_missing); init_missing);

View File

@ -170,7 +170,6 @@ rtree_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
* node is a leaf, so it contains values rather than \ * node is a leaf, so it contains values rather than \
* child pointers. \ * child pointers. \
*/ \ */ \
if (likely(key != 0)) { \
if (RTREE_CTX_NCACHE > 1) { \ if (RTREE_CTX_NCACHE > 1) { \
memmove(&rtree_ctx->cache[1], \ memmove(&rtree_ctx->cache[1], \
&rtree_ctx->cache[0], \ &rtree_ctx->cache[0], \
@ -180,7 +179,6 @@ rtree_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t leafkey = rtree_leafkey(key); \ uintptr_t leafkey = rtree_leafkey(key); \
rtree_ctx->cache[0].leafkey = leafkey; \ rtree_ctx->cache[0].leafkey = leafkey; \
rtree_ctx->cache[0].leaf = node; \ rtree_ctx->cache[0].leaf = node; \
} \
uintptr_t subkey = rtree_subkey(key, level); \ uintptr_t subkey = rtree_subkey(key, level); \
return &node[subkey]; \ return &node[subkey]; \
} }

View File

@ -40,7 +40,7 @@ TEST_BEGIN(test_rtree_read_empty) {
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
test_rtree = &rtree; test_rtree = &rtree;
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure"); assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, 0, false), assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, PAGE, false),
"rtree_read() should return NULL for empty tree"); "rtree_read() should return NULL for empty tree");
rtree_delete(tsdn, &rtree); rtree_delete(tsdn, &rtree);
test_rtree = NULL; test_rtree = NULL;
@ -139,9 +139,10 @@ TEST_BEGIN(test_rtree_extrema) {
test_rtree = &rtree; test_rtree = &rtree;
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure"); assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, 0, &extent_a), assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, PAGE, &extent_a),
"Unexpected rtree_write() failure"); "Unexpected rtree_write() failure");
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, 0, true), &extent_a, assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, PAGE, true),
&extent_a,
"rtree_read() should return previously set value"); "rtree_read() should return previously set value");
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0), assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0),
@ -158,7 +159,8 @@ TEST_END
TEST_BEGIN(test_rtree_bits) { TEST_BEGIN(test_rtree_bits) {
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
uintptr_t keys[] = {0, 1, (((uintptr_t)1) << LG_PAGE) - 1}; uintptr_t keys[] = {PAGE, PAGE + 1,
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
extent_t extent; extent_t extent;
rtree_t rtree; rtree_t rtree;
@ -180,7 +182,7 @@ TEST_BEGIN(test_rtree_bits) {
"key=%#"FMTxPTR, i, j, keys[i], keys[j]); "key=%#"FMTxPTR, i, j, keys[i], keys[j]);
} }
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx,
(((uintptr_t)1) << LG_PAGE), false), (((uintptr_t)2) << LG_PAGE), false),
"Only leftmost rtree leaf should be set; i=%u", i); "Only leftmost rtree leaf should be set; i=%u", i);
rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]); rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]);
} }