diff --git a/configure.ac b/configure.ac index 7530eff7..e71edd72 100644 --- a/configure.ac +++ b/configure.ac @@ -406,6 +406,74 @@ case "${host_cpu}" in esac AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) +case "${host_cpu}" in + aarch64) + AC_MSG_CHECKING([number of significant virtual address bits]) + LG_VADDR=48 + AC_MSG_RESULT([$LG_VADDR]) + ;; + x86_64) + AC_CACHE_CHECK([number of significant virtual address bits], + [je_cv_lg_vaddr], + AC_RUN_IFELSE([AC_LANG_PROGRAM( +[[ +#include +#ifdef _WIN32 +#include +#include +typedef unsigned __int32 uint32_t; +#else +#include +#endif +]], [[ + uint32_t r[[4]]; + uint32_t eax_in = 0x80000008U; +#ifdef _WIN32 + __cpuid((int *)r, (int)eax_in); +#else + asm volatile ("cpuid" + : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]]) + : "a" (eax_in), "c" (0) + ); +#endif + uint32_t eax_out = r[[0]]; + uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); + FILE *f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + fprintf(f, "%u", vaddr); + fclose(f); + return 0; +]])], + [je_cv_lg_vaddr=`cat conftest.out`], + [je_cv_lg_vaddr=error], + [je_cv_lg_vaddr=57])) + if test "x${je_cv_lg_vaddr}" != "x" ; then + LG_VADDR="${je_cv_lg_vaddr}" + fi + if test "x${LG_VADDR}" != "xerror" ; then + AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) + else + AC_MSG_ERROR([cannot determine number of significant virtual address bits]) + fi + ;; + *) + AC_MSG_CHECKING([number of significant virtual address bits]) + if test "x${LG_SIZEOF_PTR}" = "x3" ; then + LG_VADDR=64 + elif test "x${LG_SIZEOF_PTR}" = "x2" ; then + LG_VADDR=32 + elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then + LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" + else + AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}]) + fi + AC_MSG_RESULT([$LG_VADDR]) + ;; +esac +AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) + LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index 396a1a27..6c70e167 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -22,6 +22,13 @@ */ #undef CPU_SPINWAIT +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#undef LG_VADDR + /* Defined if C11 atomics are available. */ #undef JEMALLOC_C11ATOMICS diff --git a/include/jemalloc/internal/rtree_externs.h b/include/jemalloc/internal/rtree_externs.h index f4f2feb5..fa53580a 100644 --- a/include/jemalloc/internal/rtree_externs.h +++ b/include/jemalloc/internal/rtree_externs.h @@ -1,7 +1,29 @@ #ifndef JEMALLOC_INTERNAL_RTREE_EXTERNS_H #define JEMALLOC_INTERNAL_RTREE_EXTERNS_H -bool rtree_new(rtree_t *rtree, unsigned bits); +/* + * Split the bits into one to three partitions depending on number of + * significant bits. It the number of bits does not divide evenly into the + * number of levels, place one remainder bit per level starting at the leaf + * level. + */ +static const rtree_level_t rtree_levels[] = { +#if RTREE_NSB <= 10 + {RTREE_NSB, RTREE_NHIB + RTREE_NSB} +#elif RTREE_NSB <= 36 + {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, + {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} +#elif RTREE_NSB <= 52 + {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, + {RTREE_NSB/3 + RTREE_NSB%3/2, + RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, + {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} +#else +# error Unsupported number of significant virtual address bits +#endif +}; + +bool rtree_new(rtree_t *rtree); #ifdef JEMALLOC_JET typedef rtree_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_node_alloc_t *rtree_node_alloc; diff --git a/include/jemalloc/internal/rtree_inlines.h b/include/jemalloc/internal/rtree_inlines.h index 86aa8cd1..4b848541 100644 --- a/include/jemalloc/internal/rtree_inlines.h +++ b/include/jemalloc/internal/rtree_inlines.h @@ -2,8 +2,8 @@ #define JEMALLOC_INTERNAL_RTREE_INLINES_H #ifndef JEMALLOC_ENABLE_INLINE -uintptr_t rtree_leafkey(rtree_t *rtree, uintptr_t key); -uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); +uintptr_t rtree_leafkey(uintptr_t key); +uintptr_t rtree_subkey(uintptr_t key, unsigned level); extent_t *rtree_elm_read(rtree_elm_t *elm, bool dependent); void rtree_elm_write(rtree_elm_t *elm, const extent_t *extent); rtree_elm_t *rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, @@ -25,21 +25,21 @@ void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_leafkey(rtree_t *rtree, uintptr_t key) { +rtree_leafkey(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); - unsigned cumbits = (rtree->levels[rtree->height-1].cumbits - - rtree->levels[rtree->height-1].bits); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); unsigned maskbits = ptrbits - cumbits; uintptr_t mask = ~((ZU(1) << maskbits) - 1); return (key & mask); } JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) { +rtree_subkey(uintptr_t key, unsigned level) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); - unsigned cumbits = rtree->levels[level].cumbits; + unsigned cumbits = rtree_levels[level].cumbits; unsigned shiftbits = ptrbits - cumbits; - unsigned maskbits = rtree->levels[level].bits; + unsigned maskbits = rtree_levels[level].bits; unsigned mask = (ZU(1) << maskbits) - 1; return ((key >> shiftbits) & mask); } @@ -82,7 +82,7 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, assert(!dependent || !init_missing); if (likely(key != 0)) { - uintptr_t leafkey = rtree_leafkey(rtree, key); + uintptr_t leafkey = rtree_leafkey(key); #define RTREE_CACHE_CHECK(i) do { \ if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \ rtree_elm_t *leaf = rtree_ctx->cache[i].leaf; \ @@ -94,8 +94,8 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, rtree_ctx->cache[0].leafkey = leafkey; \ rtree_ctx->cache[0].leaf = leaf; \ \ - uintptr_t subkey = rtree_subkey(rtree, \ - key, rtree->height-1); \ + uintptr_t subkey = rtree_subkey(key, \ + RTREE_HEIGHT-1); \ return &leaf[subkey]; \ } \ } \ diff --git a/include/jemalloc/internal/rtree_structs.h b/include/jemalloc/internal/rtree_structs.h index 713d3000..312171e3 100644 --- a/include/jemalloc/internal/rtree_structs.h +++ b/include/jemalloc/internal/rtree_structs.h @@ -41,12 +41,10 @@ struct rtree_ctx_s { }; struct rtree_s { - unsigned height; union { void *root_pun; rtree_elm_t *root; }; - rtree_level_t levels[RTREE_HEIGHT_MAX]; malloc_mutex_t init_lock; }; diff --git a/include/jemalloc/internal/rtree_types.h b/include/jemalloc/internal/rtree_types.h index b4ab018d..a654698b 100644 --- a/include/jemalloc/internal/rtree_types.h +++ b/include/jemalloc/internal/rtree_types.h @@ -16,15 +16,14 @@ typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; typedef struct rtree_ctx_s rtree_ctx_t; typedef struct rtree_s rtree_t; -/* - * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the - * machine address width. - */ -#define LG_RTREE_BITS_PER_LEVEL 4 -#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) -/* Maximum rtree height. */ -#define RTREE_HEIGHT_MAX \ - ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) +/* Number of high insignificant bits. */ +#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) +/* Number of low insigificant bits. */ +#define RTREE_NLIB LG_PAGE +/* Number of significant bits. */ +#define RTREE_NSB (LG_VADDR - RTREE_NLIB) +/* Number of levels in radix tree. */ +#define RTREE_HEIGHT (sizeof(rtree_levels)/sizeof(rtree_level_t)) /* * Number of leafkey/leaf pairs to cache. Each entry supports an entire leaf, diff --git a/src/extent.c b/src/extent.c index 4a83f694..85c92d0f 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1522,8 +1522,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, bool extent_boot(void) { - if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - - LG_PAGE))) { + if (rtree_new(&extents_rtree)) { return true; } diff --git a/src/rtree.c b/src/rtree.c index fb52cf68..83929ba6 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -6,46 +6,11 @@ * used. */ bool -rtree_new(rtree_t *rtree, unsigned bits) { - unsigned bits_in_leaf, height, i; - - assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / - RTREE_BITS_PER_LEVEL)); - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL - : (bits % RTREE_BITS_PER_LEVEL); - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; - if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) { - height++; - } - } else { - height = 1; - } - assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); - - rtree->height = height; - +rtree_new(rtree_t *rtree) { rtree->root_pun = NULL; - - /* Root level. */ - rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : - bits_in_leaf; - rtree->levels[0].cumbits = rtree->levels[0].bits; - /* Interior levels. */ - for (i = 1; i < height-1; i++) { - rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; - rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + - RTREE_BITS_PER_LEVEL; + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE)) { + return true; } - /* Leaf level. */ - if (height > 1) { - rtree->levels[height-1].bits = bits_in_leaf; - rtree->levels[height-1].cumbits = bits; - } - - malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE); return false; } @@ -84,10 +49,10 @@ rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl); static void rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node, unsigned level) { - if (level + 1 < rtree->height) { + if (level + 1 < RTREE_HEIGHT) { size_t nchildren, i; - nchildren = ZU(1) << rtree->levels[level].bits; + nchildren = ZU(1) << rtree_levels[level].bits; for (i = 0; i < nchildren; i++) { rtree_elm_t *child = node[i].child; if (child != NULL) { @@ -116,7 +81,7 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, node = atomic_read_p((void**)elmp); if (node == NULL) { node = rtree_node_alloc(tsdn, rtree, ZU(1) << - rtree->levels[level].bits); + rtree_levels[level].bits); if (node == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; @@ -186,24 +151,18 @@ rtree_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, rtree_elm_t *node = init_missing ? rtree_subtree_read(tsdn, rtree, dependent) : rtree_subtree_tryread(rtree, dependent); -#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) - switch (RTREE_GET_BIAS) { -#define RTREE_GET_SUBTREE(level) \ - case level: { \ - assert(level < (RTREE_HEIGHT_MAX-1)); \ +#define RTREE_GET_SUBTREE(level) { \ + assert(level < RTREE_HEIGHT-1); \ if (!dependent && unlikely(!rtree_node_valid(node))) { \ return NULL; \ } \ - uintptr_t subkey = rtree_subkey(rtree, key, level - \ - RTREE_GET_BIAS); \ + uintptr_t subkey = rtree_subkey(key, level); \ node = init_missing ? rtree_child_read(tsdn, rtree, \ - &node[subkey], level - RTREE_GET_BIAS, dependent) : \ + &node[subkey], level, dependent) : \ rtree_child_tryread(&node[subkey], dependent); \ - /* Fall through. */ \ } -#define RTREE_GET_LEAF(level) \ - case level: { \ - assert(level == (RTREE_HEIGHT_MAX-1)); \ +#define RTREE_GET_LEAF(level) { \ + assert(level == RTREE_HEIGHT-1); \ if (!dependent && unlikely(!rtree_node_valid(node))) { \ return NULL; \ } \ @@ -218,68 +177,27 @@ rtree_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, sizeof(rtree_ctx_cache_elm_t) * \ (RTREE_CTX_NCACHE-1)); \ } \ - uintptr_t leafkey = rtree_leafkey(rtree, key); \ + uintptr_t leafkey = rtree_leafkey(key); \ rtree_ctx->cache[0].leafkey = leafkey; \ rtree_ctx->cache[0].leaf = node; \ } \ - uintptr_t subkey = rtree_subkey(rtree, key, level - \ - RTREE_GET_BIAS); \ + uintptr_t subkey = rtree_subkey(key, level); \ return &node[subkey]; \ } -#if RTREE_HEIGHT_MAX > 1 - RTREE_GET_SUBTREE(0) -#endif -#if RTREE_HEIGHT_MAX > 2 - RTREE_GET_SUBTREE(1) -#endif -#if RTREE_HEIGHT_MAX > 3 - RTREE_GET_SUBTREE(2) -#endif -#if RTREE_HEIGHT_MAX > 4 - RTREE_GET_SUBTREE(3) -#endif -#if RTREE_HEIGHT_MAX > 5 - RTREE_GET_SUBTREE(4) -#endif -#if RTREE_HEIGHT_MAX > 6 - RTREE_GET_SUBTREE(5) -#endif -#if RTREE_HEIGHT_MAX > 7 - RTREE_GET_SUBTREE(6) -#endif -#if RTREE_HEIGHT_MAX > 8 - RTREE_GET_SUBTREE(7) -#endif -#if RTREE_HEIGHT_MAX > 9 - RTREE_GET_SUBTREE(8) -#endif -#if RTREE_HEIGHT_MAX > 10 - RTREE_GET_SUBTREE(9) -#endif -#if RTREE_HEIGHT_MAX > 11 - RTREE_GET_SUBTREE(10) -#endif -#if RTREE_HEIGHT_MAX > 12 - RTREE_GET_SUBTREE(11) -#endif -#if RTREE_HEIGHT_MAX > 13 - RTREE_GET_SUBTREE(12) -#endif -#if RTREE_HEIGHT_MAX > 14 - RTREE_GET_SUBTREE(13) -#endif -#if RTREE_HEIGHT_MAX > 15 - RTREE_GET_SUBTREE(14) -#endif -#if RTREE_HEIGHT_MAX > 16 -# error Unsupported RTREE_HEIGHT_MAX -#endif - RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) + if (RTREE_HEIGHT > 1) { + RTREE_GET_SUBTREE(0) + } + if (RTREE_HEIGHT > 2) { + RTREE_GET_SUBTREE(1) + } + if (RTREE_HEIGHT > 3) { + for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + RTREE_GET_SUBTREE(i) + } + } + RTREE_GET_LEAF(RTREE_HEIGHT-1) #undef RTREE_GET_SUBTREE #undef RTREE_GET_LEAF - default: not_reached(); - } -#undef RTREE_GET_BIAS not_reached(); } @@ -351,7 +269,7 @@ rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, witness_init(&rew->witness, "rtree_elm", WITNESS_RANK_RTREE_ELM, rtree_elm_witness_comp, NULL); - return; + return; } } not_reached(); diff --git a/test/unit/rtree.c b/test/unit/rtree.c index d40e6490..2088595b 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -33,31 +33,26 @@ rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) { TEST_BEGIN(test_rtree_read_empty) { tsdn_t *tsdn; - unsigned i; tsdn = tsdn_fetch(); - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t rtree; - rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; - test_rtree = &rtree; - assert_false(rtree_new(&rtree, i), - "Unexpected rtree_new() failure"); - assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, 0, false), - "rtree_read() should return NULL for empty tree"); - rtree_delete(tsdn, &rtree); - test_rtree = NULL; - } + rtree_t rtree; + rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; + test_rtree = &rtree; + assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure"); + assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, 0, false), + "rtree_read() should return NULL for empty tree"); + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } TEST_END #define NTHREADS 8 -#define MAX_NBITS 18 +#define MAX_NBITS 30 #define NITERS 1000 #define SEED 42 typedef struct { - unsigned nbits; rtree_t rtree; uint32_t seed; } thd_start_arg_t; @@ -77,7 +72,8 @@ thd_start(void *varg) { tsdn = tsdn_fetch(); for (i = 0; i < NITERS; i++) { - uintptr_t key = (uintptr_t)gen_rand64(sfmt); + uintptr_t key = (uintptr_t)(gen_rand64(sfmt) & ((ZU(1) << + MAX_NBITS) - ZU(1))); if (i % 2 == 0) { rtree_elm_t *elm; @@ -110,165 +106,134 @@ TEST_BEGIN(test_rtree_concurrent) { thd_t thds[NTHREADS]; sfmt_t *sfmt; tsdn_t *tsdn; - unsigned i, j; sfmt = init_gen_rand(SEED); tsdn = tsdn_fetch(); - for (i = 1; i < MAX_NBITS; i++) { - arg.nbits = i; - test_rtree = &arg.rtree; - assert_false(rtree_new(&arg.rtree, arg.nbits), - "Unexpected rtree_new() failure"); - arg.seed = gen_rand32(sfmt); - for (j = 0; j < NTHREADS; j++) { - thd_create(&thds[j], thd_start, (void *)&arg); - } - for (j = 0; j < NTHREADS; j++) { - thd_join(thds[j], NULL); - } - rtree_delete(tsdn, &arg.rtree); - test_rtree = NULL; + test_rtree = &arg.rtree; + assert_false(rtree_new(&arg.rtree), "Unexpected rtree_new() failure"); + arg.seed = gen_rand32(sfmt); + for (unsigned i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, (void *)&arg); } + for (unsigned i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } + rtree_delete(tsdn, &arg.rtree); + test_rtree = NULL; fini_gen_rand(sfmt); } TEST_END #undef NTHREADS -#undef MAX_NBITS #undef NITERS #undef SEED TEST_BEGIN(test_rtree_extrema) { - unsigned i; extent_t extent_a, extent_b; tsdn_t *tsdn; tsdn = tsdn_fetch(); - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t rtree; - rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; - test_rtree = &rtree; - assert_false(rtree_new(&rtree, i), - "Unexpected rtree_new() failure"); + rtree_t rtree; + rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; + test_rtree = &rtree; + assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure"); - assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, 0, - &extent_a), "Unexpected rtree_write() failure, i=%u", i); - assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, 0, true), - &extent_a, - "rtree_read() should return previously set value, i=%u", i); + assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, 0, &extent_a), + "Unexpected rtree_write() failure"); + assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, 0, true), &extent_a, + "rtree_read() should return previously set value"); - assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, - ~((uintptr_t)0), &extent_b), - "Unexpected rtree_write() failure, i=%u", i); - assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, - ~((uintptr_t)0), true), &extent_b, - "rtree_read() should return previously set value, i=%u", i); + assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0), + &extent_b), "Unexpected rtree_write() failure"); + assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0), + true), &extent_b, + "rtree_read() should return previously set value"); - rtree_delete(tsdn, &rtree); - test_rtree = NULL; - } + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } TEST_END TEST_BEGIN(test_rtree_bits) { - tsdn_t *tsdn; - unsigned i, j, k; + tsdn_t *tsdn = tsdn_fetch(); - tsdn = tsdn_fetch(); + uintptr_t keys[] = {0, 1, (((uintptr_t)1) << LG_PAGE) - 1}; - for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[] = {0, 1, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; - extent_t extent; - rtree_t rtree; - rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; + extent_t extent; + rtree_t rtree; + rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; - test_rtree = &rtree; - assert_false(rtree_new(&rtree, i), - "Unexpected rtree_new() failure"); + test_rtree = &rtree; + assert_false(rtree_new(&rtree), + "Unexpected rtree_new() failure"); - for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, - keys[j], &extent), - "Unexpected rtree_write() failure"); - for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { - assert_ptr_eq(rtree_read(tsdn, &rtree, - &rtree_ctx, keys[k], true), &extent, - "rtree_read() should return previously set " - "value and ignore insignificant key bits; " - "i=%u, j=%u, k=%u, set key=%#"FMTxPTR", " - "get key=%#"FMTxPTR, i, j, k, keys[j], - keys[k]); - } - assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), - "Only leftmost rtree leaf should be set; " - "i=%u, j=%u", i, j); - rtree_clear(tsdn, &rtree, &rtree_ctx, keys[j]); + for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { + assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, keys[i], + &extent), "Unexpected rtree_write() failure"); + for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, + keys[j], true), &extent, + "rtree_read() should return previously set " + "value and ignore insignificant key bits; " + "i=%u, j=%u, set key=%#"FMTxPTR", get " + "key=%#"FMTxPTR, i, j, keys[i], keys[j]); } - - rtree_delete(tsdn, &rtree); - test_rtree = NULL; + assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, + (((uintptr_t)1) << LG_PAGE), false), + "Only leftmost rtree leaf should be set; i=%u", i); + rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]); } + + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } TEST_END TEST_BEGIN(test_rtree_random) { - unsigned i; - sfmt_t *sfmt; - tsdn_t *tsdn; #define NSET 16 #define SEED 42 + sfmt_t *sfmt = init_gen_rand(SEED); + tsdn_t *tsdn = tsdn_fetch(); + uintptr_t keys[NSET]; + extent_t extent; + rtree_t rtree; + rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; + rtree_elm_t *elm; - sfmt = init_gen_rand(SEED); - tsdn = tsdn_fetch(); - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[NSET]; - extent_t extent; - unsigned j; - rtree_t rtree; - rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; - rtree_elm_t *elm; + test_rtree = &rtree; + assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure"); - test_rtree = &rtree; - assert_false(rtree_new(&rtree, i), - "Unexpected rtree_new() failure"); - - for (j = 0; j < NSET; j++) { - keys[j] = (uintptr_t)gen_rand64(sfmt); - elm = rtree_elm_acquire(tsdn, &rtree, &rtree_ctx, - keys[j], false, true); - assert_ptr_not_null(elm, - "Unexpected rtree_elm_acquire() failure"); - rtree_elm_write_acquired(tsdn, &rtree, elm, &extent); - rtree_elm_release(tsdn, &rtree, elm); - assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, - keys[j], true), &extent, - "rtree_read() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, - keys[j], true), &extent, - "rtree_read() should return previously set value, " - "j=%u", j); - } - - for (j = 0; j < NSET; j++) { - rtree_clear(tsdn, &rtree, &rtree_ctx, keys[j]); - assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, - keys[j], true), - "rtree_read() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, - keys[j], true), - "rtree_read() should return previously set value"); - } - - rtree_delete(tsdn, &rtree); - test_rtree = NULL; + for (unsigned i = 0; i < NSET; i++) { + keys[i] = (uintptr_t)gen_rand64(sfmt); + elm = rtree_elm_acquire(tsdn, &rtree, &rtree_ctx, keys[i], + false, true); + assert_ptr_not_null(elm, + "Unexpected rtree_elm_acquire() failure"); + rtree_elm_write_acquired(tsdn, &rtree, elm, &extent); + rtree_elm_release(tsdn, &rtree, elm); + assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i], + true), &extent, + "rtree_read() should return previously set value"); } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i], + true), &extent, + "rtree_read() should return previously set value, i=%u", i); + } + + for (unsigned i = 0; i < NSET; i++) { + rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]); + assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i], + true), "rtree_read() should return previously set value"); + } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i], + true), "rtree_read() should return previously set value"); + } + + rtree_delete(tsdn, &rtree); + test_rtree = NULL; fini_gen_rand(sfmt); #undef NSET #undef SEED