Protect the rtree/extent interactions with a mutex pool.
Instead of embedding a lock bit in rtree leaf elements, we associate extents with a small set of mutexes. This gets us two things: - We can use the system mutexes. This (hypothetically) protects us from priority inversion, and lets us stop doing a backoff/sleep loop, instead opting for precise wakeups from the mutex. - Cuts down on the number of mutex acquisitions we have to do (from 4 in the worst case to two). We end up simplifying most of the rtree code (which no longer has to deal with locking or concurrency at all), at the cost of additional complexity in the extent code: since the mutex protecting the rtree leaf elements is determined by reading the extent out of those elements, the initial read is racy, so that we may acquire an out of date mutex. We re-check the extent in the leaf after acquiring the mutex to protect us from this race.
This commit is contained in:
parent
26c792e61a
commit
3f685e8824
@ -103,6 +103,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/large.c \
|
||||
$(srcroot)src/malloc_io.c \
|
||||
$(srcroot)src/mutex.c \
|
||||
$(srcroot)src/mutex_pool.c \
|
||||
$(srcroot)src/nstime.c \
|
||||
$(srcroot)src/pages.c \
|
||||
$(srcroot)src/prng.c \
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
extern rtree_t extents_rtree;
|
||||
extern const extent_hooks_t extent_hooks_default;
|
||||
extern mutex_pool_t extent_mutex_pool;
|
||||
|
||||
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
|
@ -1,10 +1,37 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||
|
||||
#include "jemalloc/internal/mutex_pool_inlines.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/prng.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
static inline void
|
||||
extent_lock(tsdn_t *tsdn, extent_t *extent) {
|
||||
assert(extent != NULL);
|
||||
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
|
||||
assert(extent != NULL);
|
||||
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
||||
assert(extent1 != NULL && extent2 != NULL);
|
||||
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
||||
(uintptr_t)extent2);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
||||
assert(extent1 != NULL && extent2 != NULL);
|
||||
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
||||
(uintptr_t)extent2);
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
extent_arena_get(const extent_t *extent) {
|
||||
unsigned arena_ind = (unsigned)((extent->e_bits &
|
||||
|
@ -56,6 +56,7 @@
|
||||
|
||||
#include "jemalloc/internal/witness_structs.h"
|
||||
#include "jemalloc/internal/mutex_structs.h"
|
||||
#include "jemalloc/internal/mutex_pool_structs.h"
|
||||
#include "jemalloc/internal/arena_structs_a.h"
|
||||
#include "jemalloc/internal/extent_structs.h"
|
||||
#include "jemalloc/internal/extent_dss_structs.h"
|
||||
@ -88,6 +89,7 @@
|
||||
|
||||
#include "jemalloc/internal/witness_inlines.h"
|
||||
#include "jemalloc/internal/mutex_inlines.h"
|
||||
#include "jemalloc/internal/mutex_pool_inlines.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
|
||||
#include "jemalloc/internal/rtree_inlines.h"
|
||||
#include "jemalloc/internal/base_inlines.h"
|
||||
|
89
include/jemalloc/internal/mutex_pool_inlines.h
Normal file
89
include/jemalloc/internal/mutex_pool_inlines.h
Normal file
@ -0,0 +1,89 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_POOL_INLINES_H
|
||||
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/mutex_inlines.h"
|
||||
#include "jemalloc/internal/mutex_pool_structs.h"
|
||||
|
||||
/*
|
||||
* This file really combines "inlines" and "externs", but only transitionally.
|
||||
*/
|
||||
|
||||
bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
|
||||
|
||||
static inline malloc_mutex_t *
|
||||
mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
|
||||
size_t hash_result[2];
|
||||
hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
|
||||
return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
|
||||
for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
|
||||
* You're not allowed to acquire mutexes in the pool one at a time. You have to
|
||||
* acquire all the mutexes you'll need in a single function call, and then
|
||||
* release them all in a single function call.
|
||||
*/
|
||||
|
||||
static inline void
|
||||
mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
|
||||
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
|
||||
malloc_mutex_lock(tsdn, mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
|
||||
uintptr_t key2) {
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
|
||||
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
|
||||
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
|
||||
if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
|
||||
malloc_mutex_lock(tsdn, mutex1);
|
||||
malloc_mutex_lock(tsdn, mutex2);
|
||||
} else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
|
||||
malloc_mutex_lock(tsdn, mutex1);
|
||||
} else {
|
||||
malloc_mutex_lock(tsdn, mutex2);
|
||||
malloc_mutex_lock(tsdn, mutex1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
|
||||
uintptr_t key2) {
|
||||
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
|
||||
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
|
||||
if (mutex1 == mutex2) {
|
||||
malloc_mutex_unlock(tsdn, mutex1);
|
||||
} else {
|
||||
malloc_mutex_unlock(tsdn, mutex1);
|
||||
malloc_mutex_unlock(tsdn, mutex2);
|
||||
}
|
||||
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||
malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_INLINES_H */
|
14
include/jemalloc/internal/mutex_pool_structs.h
Normal file
14
include/jemalloc/internal/mutex_pool_structs.h
Normal file
@ -0,0 +1,14 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H
|
||||
|
||||
/* This file really combines "structs" and "types", but only transitionally. */
|
||||
|
||||
/* We do mod reductions by this value, so it should be kept a power of 2. */
|
||||
#define MUTEX_POOL_SIZE 256
|
||||
|
||||
typedef struct mutex_pool_s mutex_pool_t;
|
||||
struct mutex_pool_s {
|
||||
malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_STRUCTS_H */
|
@ -3,6 +3,8 @@
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/witness_types.h"
|
||||
#include "jemalloc/internal/witness_structs.h"
|
||||
|
||||
struct malloc_mutex_s {
|
||||
union {
|
||||
|
@ -41,11 +41,5 @@ void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
|
||||
#endif
|
||||
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
||||
void rtree_leaf_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree,
|
||||
uintptr_t key, const rtree_leaf_elm_t *elm);
|
||||
void rtree_leaf_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
|
||||
const rtree_leaf_elm_t *elm);
|
||||
void rtree_leaf_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
|
||||
const rtree_leaf_elm_t *elm);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_EXTERNS_H */
|
||||
|
@ -47,21 +47,16 @@ rtree_subkey(uintptr_t key, unsigned level) {
|
||||
# ifdef RTREE_LEAF_COMPACT
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, bool dependent) {
|
||||
if (config_debug && acquired) {
|
||||
assert(dependent);
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
bool dependent) {
|
||||
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
||||
/* Restore sign-extended high bits, mask slab and lock bits. */
|
||||
/* Restore sign-extended high bits, mask slab bit. */
|
||||
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||
RTREE_NHIB) & ~((uintptr_t)0x3));
|
||||
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
@ -71,51 +66,29 @@ rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||
return (bool)((bits >> 1) & (uintptr_t)0x1);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_bits_locked_get(uintptr_t bits) {
|
||||
return (bool)(bits & (uintptr_t)0x1);
|
||||
}
|
||||
|
||||
# endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, bool dependent) {
|
||||
if (config_debug && acquired) {
|
||||
assert(dependent);
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, acquired,
|
||||
dependent);
|
||||
assert(!acquired || rtree_leaf_elm_bits_locked_get(bits));
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_extent_get(bits);
|
||||
#else
|
||||
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
assert(!acquired || ((uintptr_t)extent & (uintptr_t)0x1) ==
|
||||
(uintptr_t)0x1);
|
||||
/* Mask lock bit. */
|
||||
extent = (extent_t *)((uintptr_t)extent & ~((uintptr_t)0x1));
|
||||
return extent;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, bool dependent) {
|
||||
if (config_debug && acquired) {
|
||||
assert(dependent);
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, acquired,
|
||||
dependent);
|
||||
assert(!acquired || rtree_leaf_elm_bits_locked_get(bits));
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_szind_get(bits);
|
||||
#else
|
||||
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
|
||||
@ -125,16 +98,9 @@ rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, bool dependent) {
|
||||
if (config_debug && acquired) {
|
||||
assert(dependent);
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, acquired,
|
||||
dependent);
|
||||
assert(!acquired || rtree_leaf_elm_bits_locked_get(bits));
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
|
||||
@ -143,46 +109,31 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_extent_lock_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool acquired, extent_t *extent, bool lock) {
|
||||
if (config_debug && acquired) {
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
||||
|
||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
acquired, acquired);
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
||||
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits) << 1) |
|
||||
(uintptr_t)lock;
|
||||
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
if (lock) {
|
||||
/* Overlay lock bit. */
|
||||
extent = (extent_t *)((uintptr_t)extent | (uintptr_t)0x1);
|
||||
}
|
||||
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, szind_t szind) {
|
||||
if (config_debug && acquired) {
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
szind_t szind) {
|
||||
assert(szind <= NSIZES);
|
||||
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
acquired, acquired);
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits) << 1) |
|
||||
(uintptr_t)acquired;
|
||||
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
|
||||
@ -191,18 +142,13 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, bool slab) {
|
||||
if (config_debug && acquired) {
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
acquired, acquired);
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab << 1) |
|
||||
(uintptr_t)acquired;
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
|
||||
@ -211,27 +157,20 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool acquired, extent_t *extent, szind_t szind, bool slab) {
|
||||
if (config_debug && acquired) {
|
||||
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
assert(!slab || szind < NBINS);
|
||||
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)slab << 1) |
|
||||
(uintptr_t)acquired;
|
||||
((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, acquired, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, acquired, szind);
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
/*
|
||||
* Write extent last, since the element is atomically considered valid
|
||||
* as soon as the extent field is non-NULL.
|
||||
*/
|
||||
rtree_leaf_elm_extent_lock_write(tsdn, rtree, elm, acquired, extent,
|
||||
acquired);
|
||||
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -244,32 +183,8 @@ rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
||||
* The caller implicitly assures that it is the only writer to the szind
|
||||
* and slab fields, and that the extent field cannot currently change.
|
||||
*/
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
/*
|
||||
* Another thread may concurrently acquire the elm, which means that
|
||||
* even though the szind and slab fields will not be concurrently
|
||||
* modified by another thread, the fact that the lock is embedded in the
|
||||
* same word requires that a CAS operation be used here.
|
||||
*/
|
||||
spin_t spinner = SPIN_INITIALIZER;
|
||||
while (true) {
|
||||
void *old_bits = (void *)(rtree_leaf_elm_bits_read(tsdn, rtree,
|
||||
elm, false, true) & ~((uintptr_t)0x1)); /* Mask lock bit. */
|
||||
void *bits = (void *)(((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_extent_get(
|
||||
(uintptr_t)old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)slab << 1));
|
||||
if (likely(atomic_compare_exchange_strong_p(&elm->le_bits,
|
||||
&old_bits, bits, ATOMIC_ACQUIRE, ATOMIC_RELAXED))) {
|
||||
break;
|
||||
}
|
||||
spin_adaptive(&spinner);
|
||||
}
|
||||
#else
|
||||
/* No need to lock. */
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, false, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, false, szind);
|
||||
#endif
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
@ -343,9 +258,8 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false, false) ==
|
||||
NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, false, extent, szind, slab);
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -370,7 +284,7 @@ rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, false, dependent);
|
||||
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
@ -381,7 +295,7 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
if (!dependent && elm == NULL) {
|
||||
return NSIZES;
|
||||
}
|
||||
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, false, dependent);
|
||||
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -397,10 +311,8 @@ rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, false,
|
||||
dependent);
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, false,
|
||||
dependent);
|
||||
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -412,63 +324,11 @@ rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, false,
|
||||
dependent);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, false, dependent);
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline rtree_leaf_elm_t *
|
||||
rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, dependent, init_missing);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(elm != NULL);
|
||||
|
||||
spin_t spinner = SPIN_INITIALIZER;
|
||||
while (true) {
|
||||
/* The least significant bit serves as a lock. */
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
# define RTREE_FIELD_WITH_LOCK le_bits
|
||||
#else
|
||||
# define RTREE_FIELD_WITH_LOCK le_extent
|
||||
#endif
|
||||
void *bits = atomic_load_p(&elm->RTREE_FIELD_WITH_LOCK,
|
||||
ATOMIC_RELAXED);
|
||||
if (likely(((uintptr_t)bits & (uintptr_t)0x1) == 0)) {
|
||||
void *locked = (void *)((uintptr_t)bits |
|
||||
(uintptr_t)0x1);
|
||||
if (likely(atomic_compare_exchange_strong_p(
|
||||
&elm->RTREE_FIELD_WITH_LOCK, &bits, locked,
|
||||
ATOMIC_ACQUIRE, ATOMIC_RELAXED))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_adaptive(&spinner);
|
||||
#undef RTREE_FIELD_WITH_LOCK
|
||||
}
|
||||
|
||||
if (config_debug) {
|
||||
rtree_leaf_elm_witness_acquire(tsdn, rtree, key, elm);
|
||||
}
|
||||
|
||||
return elm;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm) {
|
||||
extent_t *extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, true,
|
||||
true);
|
||||
rtree_leaf_elm_extent_lock_write(tsdn, rtree, elm, true, extent, false);
|
||||
|
||||
if (config_debug) {
|
||||
rtree_leaf_elm_witness_release(tsdn, rtree, elm);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, szind_t szind, bool slab) {
|
||||
@ -482,9 +342,9 @@ static inline void
|
||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false, false) !=
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
|
||||
NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, false, NULL, NSIZES, false);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_INLINES_H */
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define JEMALLOC_INTERNAL_RTREE_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_pool_structs.h"
|
||||
|
||||
struct rtree_node_elm_s {
|
||||
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
||||
@ -18,13 +19,12 @@ struct rtree_leaf_elm_s {
|
||||
* x: index
|
||||
* e: extent
|
||||
* b: slab
|
||||
* k: lock
|
||||
*
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee00bk
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||
*/
|
||||
atomic_p_t le_bits;
|
||||
#else
|
||||
atomic_p_t le_extent; /* (extent_t *), lock in low bit */
|
||||
atomic_p_t le_extent; /* (extent_t *) */
|
||||
atomic_u_t le_szind; /* (szind_t) */
|
||||
atomic_b_t le_slab; /* (bool) */
|
||||
#endif
|
||||
|
@ -66,27 +66,4 @@ typedef struct rtree_s rtree_t;
|
||||
*/
|
||||
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}}
|
||||
|
||||
/*
|
||||
* Maximum number of concurrently acquired elements per thread. This controls
|
||||
* how many witness_t structures are embedded in tsd. Ideally rtree_leaf_elm_t
|
||||
* would have a witness_t directly embedded, but that would dramatically bloat
|
||||
* the tree. This must contain enough entries to e.g. coalesce two extents.
|
||||
*/
|
||||
#define RTREE_ELM_ACQUIRE_MAX 4
|
||||
|
||||
/* Initializers for rtree_leaf_elm_witness_tsd_t. */
|
||||
#define RTREE_ELM_WITNESS_INITIALIZER { \
|
||||
NULL, \
|
||||
WITNESS_INITIALIZER("rtree_leaf_elm", WITNESS_RANK_RTREE_ELM) \
|
||||
}
|
||||
|
||||
#define RTREE_ELM_WITNESS_TSD_INITIALIZER { \
|
||||
{ \
|
||||
RTREE_ELM_WITNESS_INITIALIZER, \
|
||||
RTREE_ELM_WITNESS_INITIALIZER, \
|
||||
RTREE_ELM_WITNESS_INITIALIZER, \
|
||||
RTREE_ELM_WITNESS_INITIALIZER \
|
||||
} \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_TYPES_H */
|
||||
|
@ -1,19 +0,0 @@
|
||||
#ifndef JEMALLOC_INTERNAL_RTREE_WITNESS_H
|
||||
#define JEMALLOC_INTERNAL_RTREE_WITNESS_H
|
||||
|
||||
#include "jemalloc/internal/rtree_types.h"
|
||||
#include "jemalloc/internal/witness_types.h"
|
||||
#include "jemalloc/internal/witness_structs.h"
|
||||
|
||||
typedef struct rtree_leaf_elm_witness_s rtree_leaf_elm_witness_t;
|
||||
struct rtree_leaf_elm_witness_s {
|
||||
const rtree_leaf_elm_t *elm;
|
||||
witness_t witness;
|
||||
};
|
||||
|
||||
typedef struct rtree_leaf_elm_witness_tsd_s rtree_leaf_elm_witness_tsd_t;
|
||||
struct rtree_leaf_elm_witness_tsd_s {
|
||||
rtree_leaf_elm_witness_t witnesses[RTREE_ELM_ACQUIRE_MAX];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_WITNESS_H */
|
@ -7,7 +7,6 @@
|
||||
#include "jemalloc/internal/prof_types.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/rtree_ctx.h"
|
||||
#include "jemalloc/internal/rtree_witness.h"
|
||||
#include "jemalloc/internal/tcache_types.h"
|
||||
#include "jemalloc/internal/tcache_structs.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
@ -76,7 +75,6 @@ typedef void (*test_callback_t)(int *);
|
||||
O(arenas_tdata, arena_tdata_t *) \
|
||||
O(tcache, tcache_t) \
|
||||
O(witnesses, witness_list_t) \
|
||||
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t) \
|
||||
O(witness_fork, bool) \
|
||||
MALLOC_TEST_TSD
|
||||
|
||||
@ -95,7 +93,6 @@ typedef void (*test_callback_t)(int *);
|
||||
NULL, \
|
||||
TCACHE_ZERO_INITIALIZER, \
|
||||
ql_head_initializer(witnesses), \
|
||||
RTREE_ELM_WITNESS_TSD_INITIALIZER, \
|
||||
false \
|
||||
MALLOC_TEST_TSD_INITIALIZER \
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
|
||||
#define WITNESS_RANK_EXTENTS 11U
|
||||
#define WITNESS_RANK_EXTENT_FREELIST 12U
|
||||
|
||||
#define WITNESS_RANK_RTREE_ELM 13U
|
||||
#define WITNESS_RANK_EXTENT_POOL 13U
|
||||
#define WITNESS_RANK_RTREE 14U
|
||||
#define WITNESS_RANK_BASE 15U
|
||||
#define WITNESS_RANK_ARENA_LARGE 16U
|
||||
|
246
src/extent.c
246
src/extent.c
@ -5,11 +5,12 @@
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
rtree_t extents_rtree;
|
||||
/* Keyed by the address of the extent_t being protected. */
|
||||
mutex_pool_t extent_mutex_pool;
|
||||
|
||||
static const bitmap_info_t extents_bitmap_info =
|
||||
BITMAP_INFO_INITIALIZER(NPSIZES+1);
|
||||
@ -95,6 +96,57 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
|
||||
rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
|
||||
extent_esnead_comp)
|
||||
|
||||
typedef enum {
|
||||
lock_result_success,
|
||||
lock_result_failure,
|
||||
lock_result_no_extent
|
||||
} lock_result_t;
|
||||
|
||||
static lock_result_t
|
||||
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
|
||||
extent_t **result) {
|
||||
extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
|
||||
elm, true);
|
||||
|
||||
if (extent1 == NULL) {
|
||||
return lock_result_no_extent;
|
||||
}
|
||||
/*
|
||||
* It's possible that the extent changed out from under us, and with it
|
||||
* the leaf->extent mapping. We have to recheck while holding the lock.
|
||||
*/
|
||||
extent_lock(tsdn, extent1);
|
||||
extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
|
||||
&extents_rtree, elm, true);
|
||||
|
||||
if (extent1 == extent2) {
|
||||
*result = extent1;
|
||||
return lock_result_success;
|
||||
} else {
|
||||
extent_unlock(tsdn, extent1);
|
||||
return lock_result_failure;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a pool-locked extent_t * if there's one associated with the given
|
||||
* address, and NULL otherwise.
|
||||
*/
|
||||
static extent_t *
|
||||
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
|
||||
extent_t *ret = NULL;
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
|
||||
rtree_ctx, (uintptr_t)addr, false, false);
|
||||
if (elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
lock_result_t lock_result;
|
||||
do {
|
||||
lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
|
||||
} while (lock_result == lock_result_failure);
|
||||
return ret;
|
||||
}
|
||||
|
||||
extent_t *
|
||||
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
@ -508,28 +560,22 @@ extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
}
|
||||
|
||||
static bool
|
||||
extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
||||
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
||||
const extent_t *extent, bool dependent, bool init_missing,
|
||||
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
|
||||
*r_elm_a = rtree_leaf_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
|
||||
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_base_get(extent), dependent, init_missing);
|
||||
if (!dependent && *r_elm_a == NULL) {
|
||||
return true;
|
||||
}
|
||||
assert(*r_elm_a != NULL);
|
||||
|
||||
if (extent_size_get(extent) > PAGE) {
|
||||
*r_elm_b = rtree_leaf_elm_acquire(tsdn, &extents_rtree,
|
||||
rtree_ctx, (uintptr_t)extent_last_get(extent), dependent,
|
||||
init_missing);
|
||||
if (!dependent && *r_elm_b == NULL) {
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, *r_elm_a);
|
||||
return true;
|
||||
}
|
||||
assert(*r_elm_b != NULL);
|
||||
} else {
|
||||
*r_elm_b = NULL;
|
||||
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_last_get(extent), dependent, init_missing);
|
||||
if (!dependent && *r_elm_b == NULL) {
|
||||
return true;
|
||||
}
|
||||
assert(*r_elm_b != NULL);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -537,20 +583,10 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
||||
static void
|
||||
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
|
||||
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, true, extent, szind,
|
||||
slab);
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
|
||||
if (elm_b != NULL) {
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, true, extent,
|
||||
szind, slab);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
extent_rtree_release(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
|
||||
rtree_leaf_elm_t *elm_b) {
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, elm_a);
|
||||
if (elm_b != NULL) {
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, elm_b);
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
|
||||
slab);
|
||||
}
|
||||
}
|
||||
|
||||
@ -609,17 +645,25 @@ extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
rtree_leaf_elm_t *elm_a, *elm_b;
|
||||
|
||||
if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a,
|
||||
&elm_b)) {
|
||||
/*
|
||||
* We need to hold the lock to protect against a concurrent coalesce
|
||||
* operation that sees us in a partial state.
|
||||
*/
|
||||
extent_lock(tsdn, extent);
|
||||
|
||||
if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
|
||||
&elm_a, &elm_b)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
||||
bool slab = extent_slab_get(extent);
|
||||
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
|
||||
if (slab) {
|
||||
extent_interior_register(tsdn, rtree_ctx, extent, szind);
|
||||
}
|
||||
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||
|
||||
extent_unlock(tsdn, extent);
|
||||
|
||||
if (config_prof && gdump_add) {
|
||||
extent_gdump_add(tsdn, extent);
|
||||
@ -663,15 +707,18 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
rtree_leaf_elm_t *elm_a, *elm_b;
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
|
||||
&elm_a, &elm_b);
|
||||
|
||||
extent_lock(tsdn, extent);
|
||||
|
||||
extent_rtree_acquire(tsdn, rtree_ctx, extent, true, false, &elm_a,
|
||||
&elm_b);
|
||||
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
|
||||
if (extent_slab_get(extent)) {
|
||||
extent_interior_deregister(tsdn, rtree_ctx, extent);
|
||||
extent_slab_set(extent, false);
|
||||
}
|
||||
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||
|
||||
extent_unlock(tsdn, extent);
|
||||
|
||||
if (config_prof) {
|
||||
extent_gdump_sub(tsdn, extent);
|
||||
@ -717,24 +764,21 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||
extent_t *extent;
|
||||
if (new_addr != NULL) {
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn,
|
||||
&extents_rtree, rtree_ctx, (uintptr_t)new_addr, false,
|
||||
false);
|
||||
if (elm != NULL) {
|
||||
extent = rtree_leaf_elm_extent_read(tsdn,
|
||||
&extents_rtree, elm, true, true);
|
||||
if (extent != NULL) {
|
||||
assert(extent_base_get(extent) == new_addr);
|
||||
if (extent_arena_get(extent) != arena ||
|
||||
extent_size_get(extent) < esize ||
|
||||
extent_state_get(extent) !=
|
||||
extents_state_get(extents)) {
|
||||
extent = NULL;
|
||||
}
|
||||
extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
|
||||
if (extent != NULL) {
|
||||
/*
|
||||
* We might null-out extent to report an error, but we
|
||||
* still need to unlock the associated mutex after.
|
||||
*/
|
||||
extent_t *unlock_extent = extent;
|
||||
assert(extent_base_get(extent) == new_addr);
|
||||
if (extent_arena_get(extent) != arena ||
|
||||
extent_size_get(extent) < esize ||
|
||||
extent_state_get(extent) !=
|
||||
extents_state_get(extents)) {
|
||||
extent = NULL;
|
||||
}
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, elm);
|
||||
} else {
|
||||
extent = NULL;
|
||||
extent_unlock(tsdn, unlock_extent);
|
||||
}
|
||||
} else {
|
||||
extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
|
||||
@ -1254,20 +1298,19 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||
again = false;
|
||||
|
||||
/* Try to coalesce forward. */
|
||||
rtree_leaf_elm_t *next_elm = rtree_leaf_elm_acquire(tsdn,
|
||||
&extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_past_get(extent), false, false);
|
||||
if (next_elm != NULL) {
|
||||
extent_t *next = rtree_leaf_elm_extent_read(tsdn,
|
||||
&extents_rtree, next_elm, true, true);
|
||||
extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
|
||||
extent_past_get(extent));
|
||||
if (next != NULL) {
|
||||
/*
|
||||
* extents->mtx only protects against races for
|
||||
* like-state extents, so call extent_can_coalesce()
|
||||
* before releasing the next_elm lock.
|
||||
* before releasing next's pool lock.
|
||||
*/
|
||||
bool can_coalesce = (next != NULL &&
|
||||
extent_can_coalesce(arena, extents, extent, next));
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, next_elm);
|
||||
bool can_coalesce = extent_can_coalesce(arena, extents,
|
||||
extent, next);
|
||||
|
||||
extent_unlock(tsdn, next);
|
||||
|
||||
if (can_coalesce && !extent_coalesce(tsdn, arena,
|
||||
r_extent_hooks, extents, extent, next, true)) {
|
||||
if (extents->delay_coalesce) {
|
||||
@ -1280,15 +1323,13 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
rtree_leaf_elm_t *prev_elm = rtree_leaf_elm_acquire(tsdn,
|
||||
&extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_before_get(extent), false, false);
|
||||
if (prev_elm != NULL) {
|
||||
extent_t *prev = rtree_leaf_elm_extent_read(tsdn,
|
||||
&extents_rtree, prev_elm, true, true);
|
||||
bool can_coalesce = (prev != NULL &&
|
||||
extent_can_coalesce(arena, extents, extent, prev));
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, prev_elm);
|
||||
extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
|
||||
extent_before_get(extent));
|
||||
if (prev != NULL) {
|
||||
bool can_coalesce = extent_can_coalesce(arena, extents,
|
||||
extent, prev);
|
||||
extent_unlock(tsdn, prev);
|
||||
|
||||
if (can_coalesce && !extent_coalesce(tsdn, arena,
|
||||
r_extent_hooks, extents, extent, prev, false)) {
|
||||
extent = prev;
|
||||
@ -1610,22 +1651,25 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
assert(extent_size_get(extent) == size_a + size_b);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
extent_t *trail;
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
||||
|
||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||
|
||||
if ((*r_extent_hooks)->split == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
trail = extent_alloc(tsdn, arena);
|
||||
extent_t *trail = extent_alloc(tsdn, arena);
|
||||
if (trail == NULL) {
|
||||
goto label_error_a;
|
||||
}
|
||||
|
||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent));
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
|
||||
{
|
||||
extent_t lead;
|
||||
|
||||
@ -1634,25 +1678,24 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent));
|
||||
|
||||
if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true,
|
||||
&lead_elm_a, &lead_elm_b)) {
|
||||
goto label_error_b;
|
||||
}
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
|
||||
true, &lead_elm_a, &lead_elm_b);
|
||||
}
|
||||
rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
|
||||
&trail_elm_a, &trail_elm_b);
|
||||
|
||||
if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
|
||||
|| trail_elm_b == NULL) {
|
||||
goto label_error_b;
|
||||
}
|
||||
|
||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent));
|
||||
if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
|
||||
&trail_elm_a, &trail_elm_b)) {
|
||||
goto label_error_c;
|
||||
}
|
||||
extent_lock2(tsdn, extent, trail);
|
||||
|
||||
if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
|
||||
size_a + size_b, size_a, size_b, extent_committed_get(extent),
|
||||
arena_ind_get(arena))) {
|
||||
goto label_error_d;
|
||||
goto label_error_c;
|
||||
}
|
||||
|
||||
extent_size_set(extent, size_a);
|
||||
@ -1663,14 +1706,11 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
|
||||
szind_b, slab_b);
|
||||
|
||||
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
||||
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
|
||||
extent_unlock2(tsdn, extent, trail);
|
||||
|
||||
return trail;
|
||||
label_error_d:
|
||||
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
|
||||
label_error_c:
|
||||
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
||||
extent_unlock2(tsdn, extent, trail);
|
||||
label_error_b:
|
||||
extent_dalloc(tsdn, arena, trail);
|
||||
label_error_a:
|
||||
@ -1734,20 +1774,20 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
||||
extent_rtree_acquire(tsdn, rtree_ctx, a, true, false, &a_elm_a,
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
|
||||
&a_elm_b);
|
||||
extent_rtree_acquire(tsdn, rtree_ctx, b, true, false, &b_elm_a,
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
|
||||
&b_elm_b);
|
||||
|
||||
extent_lock2(tsdn, a, b);
|
||||
|
||||
if (a_elm_b != NULL) {
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, true, NULL,
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
|
||||
NSIZES, false);
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, a_elm_b);
|
||||
}
|
||||
if (b_elm_b != NULL) {
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, true, NULL,
|
||||
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
|
||||
NSIZES, false);
|
||||
rtree_leaf_elm_release(tsdn, &extents_rtree, b_elm_a);
|
||||
} else {
|
||||
b_elm_b = b_elm_a;
|
||||
}
|
||||
@ -1759,7 +1799,8 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
||||
|
||||
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
|
||||
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
|
||||
|
||||
extent_unlock2(tsdn, a, b);
|
||||
|
||||
extent_dalloc(tsdn, extent_arena_get(b), b);
|
||||
|
||||
@ -1772,6 +1813,11 @@ extent_boot(void) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
|
||||
WITNESS_RANK_EXTENT_POOL)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (have_dss) {
|
||||
extent_dss_boot();
|
||||
}
|
||||
|
15
src/mutex_pool.c
Normal file
15
src/mutex_pool.c
Normal file
@ -0,0 +1,15 @@
|
||||
#define JEMALLOC_MUTEX_POOL_C_
|
||||
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
bool
|
||||
mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
|
||||
for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
|
||||
if (malloc_mutex_init(&pool->mutexes[i], name, rank,
|
||||
malloc_mutex_address_ordered)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
115
src/rtree.c
115
src/rtree.c
@ -304,121 +304,6 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
not_reached();
|
||||
}
|
||||
|
||||
static int
|
||||
rtree_leaf_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b,
|
||||
void *ob) {
|
||||
uintptr_t ka = (uintptr_t)oa;
|
||||
uintptr_t kb = (uintptr_t)ob;
|
||||
|
||||
assert(ka != 0);
|
||||
assert(kb != 0);
|
||||
|
||||
return (ka > kb) - (ka < kb);
|
||||
}
|
||||
|
||||
static witness_t *
|
||||
rtree_leaf_elm_witness_alloc(tsd_t *tsd, uintptr_t key,
|
||||
const rtree_leaf_elm_t *elm) {
|
||||
witness_t *witness;
|
||||
size_t i;
|
||||
rtree_leaf_elm_witness_tsd_t *witnesses =
|
||||
tsd_rtree_leaf_elm_witnessesp_get(tsd);
|
||||
|
||||
/* Iterate over entire array to detect double allocation attempts. */
|
||||
witness = NULL;
|
||||
for (i = 0; i < RTREE_ELM_ACQUIRE_MAX; i++) {
|
||||
rtree_leaf_elm_witness_t *rew = &witnesses->witnesses[i];
|
||||
|
||||
assert(rew->elm != elm);
|
||||
if (rew->elm == NULL && witness == NULL) {
|
||||
rew->elm = elm;
|
||||
witness = &rew->witness;
|
||||
witness_init(witness, "rtree_leaf_elm",
|
||||
WITNESS_RANK_RTREE_ELM, rtree_leaf_elm_witness_comp,
|
||||
(void *)key);
|
||||
}
|
||||
}
|
||||
assert(witness != NULL);
|
||||
return witness;
|
||||
}
|
||||
|
||||
static witness_t *
|
||||
rtree_leaf_elm_witness_find(tsd_t *tsd, const rtree_leaf_elm_t *elm) {
|
||||
size_t i;
|
||||
rtree_leaf_elm_witness_tsd_t *witnesses =
|
||||
tsd_rtree_leaf_elm_witnessesp_get(tsd);
|
||||
|
||||
for (i = 0; i < RTREE_ELM_ACQUIRE_MAX; i++) {
|
||||
rtree_leaf_elm_witness_t *rew = &witnesses->witnesses[i];
|
||||
|
||||
if (rew->elm == elm) {
|
||||
return &rew->witness;
|
||||
}
|
||||
}
|
||||
not_reached();
|
||||
}
|
||||
|
||||
static void
|
||||
rtree_leaf_elm_witness_dalloc(tsd_t *tsd, witness_t *witness,
|
||||
const rtree_leaf_elm_t *elm) {
|
||||
size_t i;
|
||||
rtree_leaf_elm_witness_tsd_t *witnesses =
|
||||
tsd_rtree_leaf_elm_witnessesp_get(tsd);
|
||||
|
||||
for (i = 0; i < RTREE_ELM_ACQUIRE_MAX; i++) {
|
||||
rtree_leaf_elm_witness_t *rew = &witnesses->witnesses[i];
|
||||
|
||||
if (rew->elm == elm) {
|
||||
rew->elm = NULL;
|
||||
witness_init(&rew->witness, "rtree_leaf_elm",
|
||||
WITNESS_RANK_RTREE_ELM, rtree_leaf_elm_witness_comp,
|
||||
NULL);
|
||||
return;
|
||||
}
|
||||
}
|
||||
not_reached();
|
||||
}
|
||||
|
||||
void
|
||||
rtree_leaf_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree,
|
||||
uintptr_t key, const rtree_leaf_elm_t *elm) {
|
||||
witness_t *witness;
|
||||
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
|
||||
witness = rtree_leaf_elm_witness_alloc(tsdn_tsd(tsdn), key, elm);
|
||||
witness_lock(tsdn, witness);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_leaf_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
|
||||
const rtree_leaf_elm_t *elm) {
|
||||
witness_t *witness;
|
||||
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
|
||||
witness = rtree_leaf_elm_witness_find(tsdn_tsd(tsdn), elm);
|
||||
witness_assert_owner(tsdn, witness);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_leaf_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
|
||||
const rtree_leaf_elm_t *elm) {
|
||||
witness_t *witness;
|
||||
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
|
||||
witness = rtree_leaf_elm_witness_find(tsdn_tsd(tsdn), elm);
|
||||
witness_unlock(tsdn, witness);
|
||||
rtree_leaf_elm_witness_dalloc(tsdn_tsd(tsdn), witness, elm);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_ctx_data_init(rtree_ctx_t *ctx) {
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
|
||||
|
@ -77,90 +77,6 @@ TEST_BEGIN(test_rtree_read_empty) {
|
||||
}
|
||||
TEST_END
|
||||
|
||||
#define NTHREADS 8
|
||||
#define MAX_NBITS 30
|
||||
#define NITERS 1000
|
||||
#define SEED 42
|
||||
|
||||
typedef struct {
|
||||
rtree_t *rtree;
|
||||
uint32_t seed;
|
||||
} thd_start_arg_t;
|
||||
|
||||
static void *
|
||||
thd_start(void *varg) {
|
||||
thd_start_arg_t *arg = (thd_start_arg_t *)varg;
|
||||
rtree_ctx_t rtree_ctx;
|
||||
rtree_ctx_data_init(&rtree_ctx);
|
||||
sfmt_t *sfmt;
|
||||
extent_t *extent;
|
||||
tsdn_t *tsdn;
|
||||
unsigned i;
|
||||
|
||||
sfmt = init_gen_rand(arg->seed);
|
||||
extent = (extent_t *)malloc(sizeof(extent));
|
||||
assert_ptr_not_null(extent, "Unexpected malloc() failure");
|
||||
extent_init(extent, NULL, NULL, 0, false, NSIZES, 0,
|
||||
extent_state_active, false, false);
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
for (i = 0; i < NITERS; i++) {
|
||||
uintptr_t key = (uintptr_t)(gen_rand64(sfmt) & ((ZU(1) <<
|
||||
MAX_NBITS) - ZU(1)));
|
||||
if (i % 2 == 0) {
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn,
|
||||
arg->rtree, &rtree_ctx, key, false, true);
|
||||
assert_ptr_not_null(elm,
|
||||
"Unexpected rtree_leaf_elm_acquire() failure");
|
||||
rtree_leaf_elm_write(tsdn, arg->rtree, elm, true,
|
||||
extent, NSIZES, false);
|
||||
rtree_leaf_elm_release(tsdn, arg->rtree, elm);
|
||||
|
||||
elm = rtree_leaf_elm_acquire(tsdn, arg->rtree,
|
||||
&rtree_ctx, key, true, false);
|
||||
assert_ptr_not_null(elm,
|
||||
"Unexpected rtree_leaf_elm_acquire() failure");
|
||||
rtree_leaf_elm_extent_read(tsdn, arg->rtree, elm, true,
|
||||
true);
|
||||
rtree_leaf_elm_szind_read(tsdn, arg->rtree, elm, true,
|
||||
true);
|
||||
rtree_leaf_elm_slab_read(tsdn, arg->rtree, elm, true,
|
||||
true);
|
||||
rtree_leaf_elm_release(tsdn, arg->rtree, elm);
|
||||
} else {
|
||||
rtree_extent_read(tsdn, arg->rtree, &rtree_ctx, key,
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
||||
free(extent);
|
||||
fini_gen_rand(sfmt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_rtree_concurrent) {
|
||||
thd_start_arg_t arg;
|
||||
thd_t thds[NTHREADS];
|
||||
sfmt_t *sfmt;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
sfmt = init_gen_rand(SEED);
|
||||
tsdn = tsdn_fetch();
|
||||
arg.rtree = &test_rtree;
|
||||
assert_false(rtree_new(arg.rtree, false),
|
||||
"Unexpected rtree_new() failure");
|
||||
arg.seed = gen_rand32(sfmt);
|
||||
for (unsigned i = 0; i < NTHREADS; i++) {
|
||||
thd_create(&thds[i], thd_start, (void *)&arg);
|
||||
}
|
||||
for (unsigned i = 0; i < NTHREADS; i++) {
|
||||
thd_join(thds[i], NULL);
|
||||
}
|
||||
rtree_delete(tsdn, arg.rtree);
|
||||
fini_gen_rand(sfmt);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
#undef NTHREADS
|
||||
#undef NITERS
|
||||
#undef SEED
|
||||
@ -254,13 +170,11 @@ TEST_BEGIN(test_rtree_random) {
|
||||
|
||||
for (unsigned i = 0; i < NSET; i++) {
|
||||
keys[i] = (uintptr_t)gen_rand64(sfmt);
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn, rtree,
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
|
||||
&rtree_ctx, keys[i], false, true);
|
||||
assert_ptr_not_null(elm,
|
||||
"Unexpected rtree_leaf_elm_acquire() failure");
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, true, &extent, NSIZES,
|
||||
false);
|
||||
rtree_leaf_elm_release(tsdn, rtree, elm);
|
||||
"Unexpected rtree_leaf_elm_lookup() failure");
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false);
|
||||
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
|
||||
keys[i], true), &extent,
|
||||
"rtree_extent_read() should return previously set value");
|
||||
@ -304,7 +218,6 @@ main(void) {
|
||||
|
||||
return test(
|
||||
test_rtree_read_empty,
|
||||
test_rtree_concurrent,
|
||||
test_rtree_extrema,
|
||||
test_rtree_bits,
|
||||
test_rtree_random);
|
||||
|
Loading…
Reference in New Issue
Block a user