Add element acquire/release capabilities to rtree.

This makes it possible to acquire short-term "ownership" of rtree
elements so that it is possible to read an extent pointer *and* read the
extent's contents with a guarantee that the element will not be modified
until the ownership is released.  This is intended as a mechanism for
resolving rtree read/write races rather than as a way to lock extents.
This commit is contained in:
Jason Evans
2016-03-28 03:06:35 -07:00
parent f4a58847d3
commit 2d2b4e98c9
6 changed files with 302 additions and 135 deletions

View File

@@ -146,7 +146,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
assert(extent_addr_get(extent) == chunk);
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, extent))
if (rtree_write(&chunks_rtree, (uintptr_t)chunk, extent))
return (true);
if (config_prof && opt_prof) {
size_t size = extent_size_get(extent);
@@ -170,10 +170,8 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
void
chunk_deregister(const void *chunk, const extent_t *extent)
{
bool err;
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
assert(!err);
rtree_clear(&chunks_rtree, (uintptr_t)chunk);
if (config_prof && opt_prof) {
size_t size = extent_size_get(extent);
size_t nsub = (size == 0) ? 1 : size / chunksize;
@@ -684,12 +682,12 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
return (false);
}
static rtree_node_elm_t *
static rtree_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
sizeof(rtree_node_elm_t)));
return ((rtree_elm_t *)base_alloc(tsdn_fetch(), nelms *
sizeof(rtree_elm_t)));
}
bool

View File

@@ -8,7 +8,10 @@ hmin(unsigned ha, unsigned hb)
return (ha < hb ? ha : hb);
}
/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
/*
* Only the most significant bits of keys passed to rtree_{read,write}() are
* used.
*/
bool
rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc)
@@ -62,7 +65,7 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
}
static void
rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
rtree_delete_subtree(rtree_t *rtree, rtree_elm_t *node, unsigned level)
{
if (level + 1 < rtree->height) {
@@ -70,7 +73,7 @@ rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
nchildren = ZU(1) << rtree->levels[level].bits;
for (i = 0; i < nchildren; i++) {
rtree_node_elm_t *child = node[i].child;
rtree_elm_t *child = node[i].child;
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
@@ -84,16 +87,16 @@ rtree_delete(rtree_t *rtree)
unsigned i;
for (i = 0; i < rtree->height; i++) {
rtree_node_elm_t *subtree = rtree->levels[i].subtree;
rtree_elm_t *subtree = rtree->levels[i].subtree;
if (subtree != NULL)
rtree_delete_subtree(rtree, subtree, i);
}
}
static rtree_node_elm_t *
rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
static rtree_elm_t *
rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp)
{
rtree_node_elm_t *node;
rtree_elm_t *node;
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
/*
@@ -114,15 +117,15 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
return (node);
}
rtree_node_elm_t *
rtree_elm_t *
rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
{
return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
}
rtree_node_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
rtree_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_elm_t *elm, unsigned level)
{
return (rtree_node_init(rtree, level, &elm->child));