2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_RTREE_INLINES_H
|
|
|
|
#define JEMALLOC_INTERNAL_RTREE_INLINES_H
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2017-02-07 05:17:12 +08:00
|
|
|
uintptr_t rtree_leafkey(uintptr_t key);
|
|
|
|
uintptr_t rtree_subkey(uintptr_t key, unsigned level);
|
2017-02-04 11:44:33 +08:00
|
|
|
extent_t *rtree_elm_read(rtree_elm_t *elm, bool dependent);
|
|
|
|
void rtree_elm_write(rtree_elm_t *elm, const extent_t *extent);
|
|
|
|
rtree_elm_t *rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree,
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
2017-02-04 11:44:33 +08:00
|
|
|
bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
2016-06-03 09:43:10 +08:00
|
|
|
uintptr_t key, const extent_t *extent);
|
2017-02-04 11:44:33 +08:00
|
|
|
extent_t *rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key, bool dependent);
|
|
|
|
rtree_elm_t *rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree,
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
2017-02-04 11:44:33 +08:00
|
|
|
extent_t *rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree,
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_t *elm);
|
2017-02-04 11:44:33 +08:00
|
|
|
void rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree,
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_t *elm, const extent_t *extent);
|
2017-02-04 11:44:33 +08:00
|
|
|
void rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm);
|
|
|
|
void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
2016-06-03 09:43:10 +08:00
|
|
|
uintptr_t key);
|
2010-09-06 01:35:13 +08:00
|
|
|
#endif
|
|
|
|
|
2011-03-19 08:56:14 +08:00
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
2017-02-04 12:21:56 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
2017-02-07 05:17:12 +08:00
|
|
|
rtree_leafkey(uintptr_t key) {
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
2017-02-07 05:17:12 +08:00
|
|
|
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
|
|
|
rtree_levels[RTREE_HEIGHT-1].bits);
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned maskbits = ptrbits - cumbits;
|
|
|
|
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
|
|
|
|
return (key & mask);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
2017-02-07 05:17:12 +08:00
|
|
|
rtree_subkey(uintptr_t key, unsigned level) {
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
2017-02-07 05:17:12 +08:00
|
|
|
unsigned cumbits = rtree_levels[level].cumbits;
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned shiftbits = ptrbits - cumbits;
|
2017-02-07 05:17:12 +08:00
|
|
|
unsigned maskbits = rtree_levels[level].bits;
|
2017-02-10 04:31:11 +08:00
|
|
|
uintptr_t mask = (ZU(1) << maskbits) - 1;
|
2017-02-04 12:21:56 +08:00
|
|
|
return ((key >> shiftbits) & mask);
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
rtree_elm_read(rtree_elm_t *elm, bool dependent) {
|
2016-03-28 18:06:35 +08:00
|
|
|
extent_t *extent;
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2015-05-16 08:02:30 +08:00
|
|
|
if (dependent) {
|
|
|
|
/*
|
2016-03-28 18:06:35 +08:00
|
|
|
* Reading a value on behalf of a pointer to a valid allocation
|
|
|
|
* is guaranteed to be a clean read even without
|
|
|
|
* synchronization, because the rtree update became visible in
|
|
|
|
* memory before the pointer came into existence.
|
2015-05-16 08:02:30 +08:00
|
|
|
*/
|
2017-03-10 06:49:32 +08:00
|
|
|
extent = (extent_t *)atomic_load_p(&elm->child_or_extent,
|
|
|
|
ATOMIC_RELAXED);
|
2015-05-16 08:02:30 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
|
|
|
* dependent on a previous rtree write, which means a stale read
|
|
|
|
* could result if synchronization were omitted here.
|
|
|
|
*/
|
2017-03-10 06:49:32 +08:00
|
|
|
extent = (extent_t *)atomic_load_p(&elm->child_or_extent,
|
|
|
|
ATOMIC_ACQUIRE);
|
2015-05-16 08:02:30 +08:00
|
|
|
}
|
2016-03-28 18:06:35 +08:00
|
|
|
|
|
|
|
/* Mask the lock bit. */
|
|
|
|
extent = (extent_t *)((uintptr_t)extent & ~((uintptr_t)0x1));
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
rtree_elm_write(rtree_elm_t *elm, const extent_t *extent) {
|
2017-03-10 06:49:32 +08:00
|
|
|
atomic_store_p(&elm->child_or_extent, (void *)extent, ATOMIC_RELEASE);
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
|
|
|
|
2016-03-28 18:06:35 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
2017-01-16 08:56:30 +08:00
|
|
|
uintptr_t key, bool dependent, bool init_missing) {
|
2017-02-05 18:50:59 +08:00
|
|
|
assert(key != 0);
|
2016-03-28 18:06:35 +08:00
|
|
|
assert(!dependent || !init_missing);
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2017-02-05 18:50:59 +08:00
|
|
|
uintptr_t leafkey = rtree_leafkey(key);
|
2017-02-04 12:21:56 +08:00
|
|
|
#define RTREE_CACHE_CHECK(i) do { \
|
2017-02-05 18:50:59 +08:00
|
|
|
if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \
|
|
|
|
rtree_elm_t *leaf = rtree_ctx->cache[i].leaf; \
|
|
|
|
if (likely(leaf != NULL)) { \
|
|
|
|
/* Reorder. */ \
|
|
|
|
memmove(&rtree_ctx->cache[1], \
|
|
|
|
&rtree_ctx->cache[0], \
|
|
|
|
sizeof(rtree_ctx_cache_elm_t) * i); \
|
|
|
|
rtree_ctx->cache[0].leafkey = leafkey; \
|
|
|
|
rtree_ctx->cache[0].leaf = leaf; \
|
2017-02-04 12:21:56 +08:00
|
|
|
\
|
2017-02-05 18:50:59 +08:00
|
|
|
uintptr_t subkey = rtree_subkey(key, \
|
|
|
|
RTREE_HEIGHT-1); \
|
|
|
|
return &leaf[subkey]; \
|
2017-02-04 12:21:56 +08:00
|
|
|
} \
|
2017-02-05 18:50:59 +08:00
|
|
|
} \
|
2017-02-04 12:21:56 +08:00
|
|
|
} while (0)
|
2017-02-05 18:50:59 +08:00
|
|
|
/* Check the MRU cache entry. */
|
|
|
|
RTREE_CACHE_CHECK(0);
|
|
|
|
/*
|
|
|
|
* Search the remaining cache elements, and on success move the matching
|
|
|
|
* element to the front. Unroll the first iteration to avoid calling
|
|
|
|
* memmove() (the compiler typically optimizes it into raw moves).
|
|
|
|
*/
|
|
|
|
if (RTREE_CTX_NCACHE > 1) {
|
|
|
|
RTREE_CACHE_CHECK(1);
|
2016-06-03 09:43:10 +08:00
|
|
|
}
|
2017-02-05 18:50:59 +08:00
|
|
|
for (unsigned i = 2; i < RTREE_CTX_NCACHE; i++) {
|
|
|
|
RTREE_CACHE_CHECK(i);
|
|
|
|
}
|
|
|
|
#undef RTREE_CACHE_CHECK
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2017-02-04 12:12:49 +08:00
|
|
|
return rtree_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent,
|
|
|
|
init_missing);
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
2017-01-16 08:56:30 +08:00
|
|
|
const extent_t *extent) {
|
2016-03-28 18:06:35 +08:00
|
|
|
rtree_elm_t *elm;
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2016-03-28 18:06:35 +08:00
|
|
|
assert(extent != NULL); /* Use rtree_clear() for this case. */
|
|
|
|
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2016-06-03 09:43:10 +08:00
|
|
|
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (elm == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-03-28 18:06:35 +08:00
|
|
|
assert(rtree_elm_read(elm, false) == NULL);
|
|
|
|
rtree_elm_write(elm, extent);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool dependent) {
|
2016-03-28 18:06:35 +08:00
|
|
|
rtree_elm_t *elm;
|
|
|
|
|
2016-06-03 09:43:10 +08:00
|
|
|
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false);
|
2017-02-04 12:17:47 +08:00
|
|
|
if (!dependent && elm == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-03-28 18:06:35 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return rtree_elm_read(elm, dependent);
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE rtree_elm_t *
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
2017-01-16 08:56:30 +08:00
|
|
|
uintptr_t key, bool dependent, bool init_missing) {
|
2017-02-06 15:58:02 +08:00
|
|
|
rtree_elm_t *elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key,
|
|
|
|
dependent, init_missing);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!dependent && elm == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-04 12:21:56 +08:00
|
|
|
|
2017-02-06 15:58:02 +08:00
|
|
|
spin_t spinner = SPIN_INITIALIZER;
|
|
|
|
while (true) {
|
2017-02-04 12:21:56 +08:00
|
|
|
/* The least significant bit serves as a lock. */
|
2017-03-10 06:49:32 +08:00
|
|
|
void *extent_and_lock = atomic_load_p(&elm->child_or_extent,
|
|
|
|
ATOMIC_RELAXED);
|
|
|
|
if (likely(((uintptr_t)extent_and_lock & (uintptr_t)0x1) == 0))
|
|
|
|
{
|
|
|
|
void *locked = (void *)((uintptr_t)extent_and_lock
|
|
|
|
| (uintptr_t)0x1);
|
|
|
|
if (likely(atomic_compare_exchange_strong_p(
|
|
|
|
&elm->child_or_extent, &extent_and_lock, locked,
|
|
|
|
ATOMIC_ACQUIRE, ATOMIC_RELAXED))) {
|
|
|
|
break;
|
|
|
|
}
|
2017-02-06 15:58:02 +08:00
|
|
|
}
|
|
|
|
spin_adaptive(&spinner);
|
|
|
|
}
|
2016-03-28 18:06:35 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_debug) {
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_witness_acquire(tsdn, rtree, key, elm);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-04-18 03:55:10 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return elm;
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE extent_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
|
2016-03-28 18:06:35 +08:00
|
|
|
extent_t *extent;
|
2017-03-10 06:49:32 +08:00
|
|
|
void *ptr = atomic_load_p(&elm->child_or_extent, ATOMIC_RELAXED);
|
|
|
|
assert(((uintptr_t)ptr & (uintptr_t)0x1) == (uintptr_t)0x1);
|
|
|
|
extent = (extent_t *)((uintptr_t)ptr & ~((uintptr_t)0x1));
|
2016-03-28 18:06:35 +08:00
|
|
|
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_debug) {
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_witness_access(tsdn, rtree, elm);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-04-18 03:55:10 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent;
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm,
|
2017-01-16 08:56:30 +08:00
|
|
|
const extent_t *extent) {
|
2016-03-28 18:06:35 +08:00
|
|
|
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
2017-03-10 06:49:32 +08:00
|
|
|
assert(((uintptr_t)atomic_load_p(&elm->child_or_extent, ATOMIC_RELAXED)
|
|
|
|
& (uintptr_t)0x1) == (uintptr_t)0x1);
|
2016-04-18 03:55:10 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_debug) {
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_witness_access(tsdn, rtree, elm);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-10 06:49:32 +08:00
|
|
|
atomic_store_p(&elm->child_or_extent, (void *)((uintptr_t)extent
|
|
|
|
| (uintptr_t)0x1), ATOMIC_RELEASE);
|
2016-04-18 03:55:10 +08:00
|
|
|
assert(rtree_elm_read_acquired(tsdn, rtree, elm) == extent);
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_write(elm, rtree_elm_read_acquired(tsdn, rtree, elm));
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_debug) {
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_witness_release(tsdn, rtree, elm);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key) {
|
2016-03-28 18:06:35 +08:00
|
|
|
rtree_elm_t *elm;
|
|
|
|
|
2016-06-03 09:43:10 +08:00
|
|
|
elm = rtree_elm_acquire(tsdn, rtree, rtree_ctx, key, true, false);
|
2016-04-18 03:55:10 +08:00
|
|
|
rtree_elm_write_acquired(tsdn, rtree, elm, NULL);
|
|
|
|
rtree_elm_release(tsdn, rtree, elm);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_RTREE_INLINES_H */
|