Fix rtree_leaf_elm_szind_slab_update().

Re-read the leaf element when atomic CAS fails due to a race with
another thread that has locked the leaf element, since
atomic_compare_exchange_strong_p() overwrites the expected value with
the actual value on failure.  This regression was introduced by
0ee0e0c155 (Implement compact rtree leaf
element representation.).

This resolves #798.
This commit is contained in:
Jason Evans 2017-05-02 21:45:46 -07:00
parent 344dd342dd
commit 0798fe6e70

View File

@ -251,17 +251,16 @@ rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
* modified by another thread, the fact that the lock is embedded in the
* same word requires that a CAS operation be used here.
*/
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, false,
true) & ~((uintptr_t)0x1); /* Mask lock bit. */
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab << 1);
spin_t spinner = SPIN_INITIALIZER;
while (true) {
void *old_bits = (void *)(rtree_leaf_elm_bits_read(tsdn, rtree,
elm, false, true) & ~((uintptr_t)0x1)); /* Mask lock bit. */
void *bits = (void *)(((uintptr_t)szind << LG_VADDR) |
((uintptr_t)rtree_leaf_elm_bits_extent_get(
(uintptr_t)old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab << 1));
if (likely(atomic_compare_exchange_strong_p(&elm->le_bits,
(void **)&old_bits, (void *)bits, ATOMIC_ACQUIRE,
ATOMIC_RELAXED))) {
&old_bits, bits, ATOMIC_ACQUIRE, ATOMIC_RELAXED))) {
break;
}
spin_adaptive(&spinner);