Refactor rtree to be lock-free.

Recent huge allocation refactoring associates huge allocations with
arenas, but it remains necessary to quickly look up huge allocation
metadata during reallocation/deallocation.  A global radix tree remains
a good solution to this problem, but locking would have become the
primary bottleneck after (upcoming) migration of chunk management from
global to per arena data structures.

This lock-free implementation uses double-checked reads to traverse the
tree, so that in the steady state, each read or write requires only a
single atomic operation.

This implementation also assures that no more than two tree levels
actually exist, through a combination of careful virtual memory
allocation which makes large sparse nodes cheap, and skipping the root
node on x64 (possible because the top 16 bits are all 0 in practice).
This commit is contained in:
Jason Evans 2015-01-30 22:54:08 -08:00
parent c810fcea1f
commit 8d0e04d42f
7 changed files with 384 additions and 229 deletions

View File

@ -35,7 +35,7 @@ extern malloc_mutex_t chunks_mtx;
/* Chunk statistics. */
extern chunk_stats_t stats_chunks;
extern rtree_t *chunks_rtree;
extern rtree_t chunks_rtree;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */

View File

@ -955,7 +955,7 @@ ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
if (rtree_get(&chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return (0);
return (isalloc(ptr, demote));

View File

@ -369,14 +369,21 @@ quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete
rtree_get
rtree_get_locked
rtree_new
rtree_postfork_child
rtree_postfork_parent
rtree_prefork
rtree_node_valid
rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
s2u
s2u_compute
s2u_lookup

View File

@ -1,170 +1,270 @@
/*
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
* associating metadata with chunks that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_node_elm_s rtree_node_elm_t;
typedef struct rtree_level_s rtree_level_t;
typedef struct rtree_s rtree_t;
/*
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
* machine address width.
*/
#define RTREE_NODESIZE (1U << 16)
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_HEIGHT_MAX \
((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
typedef void *(rtree_alloc_t)(size_t);
typedef void (rtree_dalloc_t)(void *);
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_node_elm_s {
union {
rtree_node_elm_t *child;
void *val;
};
};
struct rtree_level_s {
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
*/
rtree_node_elm_t *subtree;
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
struct rtree_s {
rtree_alloc_t *alloc;
rtree_dalloc_t *dalloc;
malloc_mutex_t mutex;
void **root;
rtree_node_alloc_t *alloc;
rtree_node_dalloc_t *dalloc;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
/*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned start_level[RTREE_HEIGHT_MAX];
rtree_level_t levels[RTREE_HEIGHT_MAX];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
unsigned level);
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
rtree_node_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifdef JEMALLOC_DEBUG
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level);
void *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, void *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
void *rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (0); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
JEMALLOC_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
if (unlikely(key == 0))
return (rtree->height - 1);
start_level = rtree->start_level[lg_floor(key) >>
LG_RTREE_BITS_PER_LEVEL];
assert(start_level < rtree->height);
return (start_level);
}
#ifdef JEMALLOC_DEBUG
# define RTREE_LOCK(l) malloc_mutex_lock(l)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE(rtree_get_locked)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
JEMALLOC_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
#define RTREE_LOCK(l)
#define RTREE_UNLOCK(l)
#ifdef JEMALLOC_DEBUG
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/
# define RTREE_GET_VALIDATE \
assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
RTREE_GET_GENERATE(rtree_get)
#undef RTREE_LOCK
#undef RTREE_UNLOCK
#undef RTREE_GET_VALIDATE
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
rtree->levels[level].cumbits)) & ((ZU(1) <<
rtree->levels[level].bits) - 1));
}
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
if (!rtree_node_valid(child))
child = atomic_read_p((void **)&elm->child);
return (child);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
rtree_node_elm_t *child;
child = rtree_child_tryread(elm);
if (unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
return (child);
}
JEMALLOC_INLINE void *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm)
{
return (atomic_read_p(&elm->val));
}
JEMALLOC_INLINE void
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, void *val)
{
atomic_write_p(&elm->val, val);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
if (!rtree_node_valid(subtree))
subtree = atomic_read_p((void **)&rtree->levels[level].subtree);
return (subtree);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level)
{
rtree_node_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level);
if (unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
return (subtree);
}
JEMALLOC_INLINE void *
rtree_get(rtree_t *rtree, uintptr_t key)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
unsigned i, start_level;
rtree_node_elm_t *node, *child;
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
child = (void**)rtree->alloc(size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
start_level = rtree_start_level(rtree, key);
for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
/**/; i++, node = child) {
if (unlikely(!rtree_node_valid(node)))
return (NULL);
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
return (rtree_val_read(rtree, &node[subkey]));
}
assert(i < rtree->height - 1);
child = rtree_child_tryread(&node[subkey]);
}
not_reached();
}
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, void *val)
{
uintptr_t subkey;
unsigned i, start_level;
rtree_node_elm_t *node, *child;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_read(rtree, start_level);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write(rtree, &node[subkey], val);
return (false);
}
assert(i < rtree->height - 1);
child = rtree_child_read(rtree, &node[subkey], i);
if (child == NULL)
return (true);
}
memset(child, 0, size);
node[subkey] = child;
}
}
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
{
uint8_t *leaf = (uint8_t *)node;
leaf[subkey] = val;
}
malloc_mutex_unlock(&rtree->mutex);
return (false);
not_reached();
}
#endif

View File

@ -21,7 +21,7 @@ static extent_tree_t chunks_ad_mmap;
static extent_tree_t chunks_szad_dss;
static extent_tree_t chunks_ad_dss;
rtree_t *chunks_rtree;
rtree_t chunks_rtree;
/* Various chunk-related settings. */
size_t chunksize;
@ -200,7 +200,7 @@ chunk_register(void *chunk, size_t size, bool base)
assert(CHUNK_ADDR2BASE(chunk) == chunk);
if (config_ivsalloc && !base) {
if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, chunk))
return (true);
}
if (config_stats || config_prof) {
@ -395,7 +395,7 @@ chunk_dalloc_core(void *chunk, size_t size)
assert((size & chunksize_mask) == 0);
if (config_ivsalloc)
rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
assert(stats_chunks.curchunks >= (size / chunksize));
@ -415,6 +415,14 @@ chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
return (false);
}
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(nelms *
sizeof(rtree_node_elm_t)));
}
bool
chunk_boot(void)
{
@ -436,9 +444,8 @@ chunk_boot(void)
extent_tree_szad_new(&chunks_szad_dss);
extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, base_alloc, NULL);
if (chunks_rtree == NULL)
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true);
}
@ -450,8 +457,6 @@ chunk_prefork(void)
{
malloc_mutex_prefork(&chunks_mtx);
if (config_ivsalloc)
rtree_prefork(chunks_rtree);
chunk_dss_prefork();
}
@ -460,8 +465,6 @@ chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
if (config_ivsalloc)
rtree_postfork_parent(chunks_rtree);
malloc_mutex_postfork_parent(&chunks_mtx);
}
@ -470,7 +473,5 @@ chunk_postfork_child(void)
{
chunk_dss_postfork_child();
if (config_ivsalloc)
rtree_postfork_child(chunks_rtree);
malloc_mutex_postfork_child(&chunks_mtx);
}

View File

@ -1,75 +1,74 @@
#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
static unsigned
hmin(unsigned ha, unsigned hb)
{
rtree_t *ret;
unsigned bits_per_level, bits_in_leaf, height, i;
return (ha < hb ? ha : hb);
}
/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
bool
rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc)
{
unsigned bits_in_leaf, height, i;
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void
*)))) - 1;
bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE /
sizeof(uint8_t)))) - 1;
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
: (bits % RTREE_BITS_PER_LEVEL);
if (bits > bits_in_leaf) {
height = 1 + (bits - bits_in_leaf) / bits_per_level;
if ((height-1) * bits_per_level + bits_in_leaf != bits)
height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
height++;
} else {
} else
height = 1;
}
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
rtree->alloc = alloc;
rtree->dalloc = dalloc;
rtree->height = height;
ret->alloc = alloc;
ret->dalloc = dalloc;
if (malloc_mutex_init(&ret->mutex)) {
if (dalloc != NULL)
dalloc(ret);
return (NULL);
/* Root level. */
rtree->levels[0].subtree = NULL;
rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
bits_in_leaf;
rtree->levels[0].cumbits = rtree->levels[0].bits;
/* Interior levels. */
for (i = 1; i < height-1; i++) {
rtree->levels[i].subtree = NULL;
rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
RTREE_BITS_PER_LEVEL;
}
ret->height = height;
/* Leaf level. */
if (height > 1) {
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
ret->level2bits[0] = (bits - bits_in_leaf) %
bits_per_level;
} else
ret->level2bits[0] = bits_per_level;
for (i = 1; i < height-1; i++)
ret->level2bits[i] = bits_per_level;
ret->level2bits[height-1] = bits_in_leaf;
} else
ret->level2bits[0] = bits;
ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
if (dalloc != NULL)
dalloc(ret);
return (NULL);
rtree->levels[height-1].subtree = NULL;
rtree->levels[height-1].bits = bits_in_leaf;
rtree->levels[height-1].cumbits = bits;
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
return (ret);
/* Compute lookup table to be used by rtree_start_level(). */
for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
1);
}
return (false);
}
static void
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
{
if (level < rtree->height - 1) {
size_t nchildren, i;
nchildren = ZU(1) << rtree->level2bits[level];
nchildren = ZU(1) << rtree->levels[level].bits;
for (i = 0; i < nchildren; i++) {
void **child = (void **)node[i];
rtree_node_elm_t *child = node[i].child;
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
@ -80,28 +79,49 @@ rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
void
rtree_delete(rtree_t *rtree)
{
unsigned i;
rtree_delete_subtree(rtree, rtree->root, 0);
rtree->dalloc(rtree);
for (i = 0; i < rtree->height; i++) {
rtree_node_elm_t *subtree = rtree->levels[i].subtree;
if (subtree != NULL)
rtree_delete_subtree(rtree, subtree, i);
}
}
void
rtree_prefork(rtree_t *rtree)
static rtree_node_elm_t *
rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
{
rtree_node_elm_t *node;
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
/*
* Another thread is already in the process of initializing.
* Spin-wait until initialization is complete.
*/
do {
CPU_SPINWAIT;
node = atomic_read_p((void **)elmp);
} while (node == RTREE_NODE_INITIALIZING);
} else {
node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
if (node == NULL)
return (NULL);
atomic_write_p((void **)elmp, node);
}
return (node);
}
rtree_node_elm_t *
rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
{
malloc_mutex_prefork(&rtree->mutex);
return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
}
void
rtree_postfork_parent(rtree_t *rtree)
rtree_node_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
malloc_mutex_postfork_parent(&rtree->mutex);
}
void
rtree_postfork_child(rtree_t *rtree)
{
malloc_mutex_postfork_child(&rtree->mutex);
return (rtree_node_init(rtree, level, &elm->child));
}

View File

@ -1,14 +1,30 @@
#include "test/jemalloc_test.h"
static rtree_node_elm_t *
node_alloc(size_t nelms)
{
return (calloc(nelms, sizeof(rtree_node_elm_t)));
}
static void
node_dalloc(rtree_node_elm_t *node)
{
free(node);
}
TEST_BEGIN(test_rtree_get_empty)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, malloc, free);
assert_u_eq(rtree_get(rtree, 0), 0,
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
assert_ptr_eq(rtree_get(&rtree, 0), NULL,
"rtree_get() should return NULL for empty tree");
rtree_delete(rtree);
rtree_delete(&rtree);
}
}
TEST_END
@ -16,19 +32,22 @@ TEST_END
TEST_BEGIN(test_rtree_extrema)
{
unsigned i;
extent_node_t node_a, node_b;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, malloc, free);
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
rtree_set(rtree, 0, 1);
assert_u_eq(rtree_get(rtree, 0), 1,
rtree_set(&rtree, 0, &node_a);
assert_ptr_eq(rtree_get(&rtree, 0), &node_a,
"rtree_get() should return previously set value");
rtree_set(rtree, ~((uintptr_t)0), 1);
assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
rtree_set(&rtree, ~((uintptr_t)0), &node_b);
assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0)), &node_b,
"rtree_get() should return previously set value");
rtree_delete(rtree);
rtree_delete(&rtree);
}
}
TEST_END
@ -40,26 +59,30 @@ TEST_BEGIN(test_rtree_bits)
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[] = {0, 1,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
rtree_t *rtree = rtree_new(i, malloc, free);
extent_node_t node;
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
rtree_set(rtree, keys[j], 1);
rtree_set(&rtree, keys[j], &node);
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
assert_u_eq(rtree_get(rtree, keys[k]), 1,
assert_ptr_eq(rtree_get(&rtree, keys[k]), &node,
"rtree_get() should return previously set "
"value and ignore insignificant key bits; "
"i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
"get key=%#"PRIxPTR, i, j, k, keys[j],
keys[k]);
}
assert_u_eq(rtree_get(rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
assert_ptr_eq(rtree_get(&rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), NULL,
"Only leftmost rtree leaf should be set; "
"i=%u, j=%u", i, j);
rtree_set(rtree, keys[j], 0);
rtree_set(&rtree, keys[j], NULL);
}
rtree_delete(rtree);
rtree_delete(&rtree);
}
}
TEST_END
@ -68,37 +91,41 @@ TEST_BEGIN(test_rtree_random)
{
unsigned i;
sfmt_t *sfmt;
#define NSET 100
#define NSET 16
#define SEED 42
sfmt = init_gen_rand(SEED);
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, malloc, free);
uintptr_t keys[NSET];
extent_node_t node;
unsigned j;
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
for (j = 0; j < NSET; j++) {
keys[j] = (uintptr_t)gen_rand64(sfmt);
rtree_set(rtree, keys[j], 1);
assert_u_eq(rtree_get(rtree, keys[j]), 1,
rtree_set(&rtree, keys[j], &node);
assert_ptr_eq(rtree_get(&rtree, keys[j]), &node,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 1,
assert_ptr_eq(rtree_get(&rtree, keys[j]), &node,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
rtree_set(rtree, keys[j], 0);
assert_u_eq(rtree_get(rtree, keys[j]), 0,
rtree_set(&rtree, keys[j], NULL);
assert_ptr_eq(rtree_get(&rtree, keys[j]), NULL,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 0,
assert_ptr_eq(rtree_get(&rtree, keys[j]), NULL,
"rtree_get() should return previously set value");
}
rtree_delete(rtree);
rtree_delete(&rtree);
}
fini_gen_rand(sfmt);
#undef NSET