Incorporate szind/slab into rtree leaves.

Expand and restructure the rtree API such that all common operations can
be achieved with minimal work, regardless of whether the rtree leaf
fields are independent versus packed into a single atomic pointer.
This commit is contained in:
Jason Evans 2017-03-16 17:57:52 -07:00
parent 944c8a3383
commit 99d68445ef
13 changed files with 471 additions and 226 deletions

View File

@ -14,7 +14,8 @@ void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
size_t arena_vsalloc(tsdn_t *tsdn, const void *ptr);
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
tcache_t *tcache, bool slow_path);
void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
@ -114,12 +115,60 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return extent_arena_get(iealloc(tsdn, ptr));
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
assert(ptr != NULL);
return index2size(extent_szind_get(extent));
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != NSIZES);
if (config_debug && unlikely(extent != NULL)) {
rtree_leaf_elm_t elm;
rtree_leaf_elm_read(rtree_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true), true, &elm);
assert(extent == rtree_leaf_elm_extent_get(&elm));
assert(szind == extent_szind_get(extent));
}
return index2size(szind);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
/*
* Return 0 if ptr is not within an extent managed by jemalloc. This
* function has two extra costs relative to isalloc():
* - The rtree calls cannot claim to be dependent lookups, which induces
* rtree lookup load dependencies.
* - The lookup may fail, so there is an extra branch to check for
* failure.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
return 0;
}
if (extent == NULL) {
return 0;
}
assert(extent_state_get(extent) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(szind != NSIZES);
return index2size(szind);
}
JEMALLOC_ALWAYS_INLINE void

View File

@ -54,7 +54,7 @@ bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
size_t length);
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, size_t size_b, szind_t szind_b);
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);

View File

@ -2,11 +2,11 @@
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#ifndef JEMALLOC_ENABLE_INLINE
extent_t *extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent);
arena_t *extent_arena_get(const extent_t *extent);
void *extent_base_get(const extent_t *extent);
void *extent_addr_get(const extent_t *extent);
size_t extent_size_get(const extent_t *extent);
szind_t extent_szind_get_maybe_invalid(const extent_t *extent);
szind_t extent_szind_get(const extent_t *extent);
size_t extent_usize_get(const extent_t *extent);
void *extent_before_get(const extent_t *extent);
@ -47,15 +47,6 @@ int extent_snad_comp(const extent_t *a, const extent_t *b);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE extent_t *
extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
dependent);
}
JEMALLOC_INLINE arena_t *
extent_arena_get(const extent_t *extent) {
return extent->e_arena;
@ -81,11 +72,18 @@ extent_size_get(const extent_t *extent) {
}
JEMALLOC_INLINE szind_t
extent_szind_get(const extent_t *extent) {
assert(extent->e_szind < NSIZES); /* Never call when "invalid". */
extent_szind_get_maybe_invalid(const extent_t *extent) {
assert(extent->e_szind <= NSIZES);
return extent->e_szind;
}
JEMALLOC_INLINE szind_t
extent_szind_get(const extent_t *extent) {
szind_t szind = extent_szind_get_maybe_invalid(extent);
assert(szind < NSIZES); /* Never call when "invalid". */
return szind;
}
JEMALLOC_INLINE size_t
extent_usize_get(const extent_t *extent) {
return index2size(extent_szind_get(extent));

View File

@ -535,7 +535,6 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/tsd_inlines.h"
#include "jemalloc/internal/witness_inlines.h"
#include "jemalloc/internal/mutex_inlines.h"
#include "jemalloc/internal/rtree_inlines.h"
#ifndef JEMALLOC_ENABLE_INLINE
pszind_t psz2ind(size_t psz);
@ -934,6 +933,7 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
#endif
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/rtree_inlines.h"
#include "jemalloc/internal/base_inlines.h"
#include "jemalloc/internal/bitmap_inlines.h"
/*
@ -994,7 +994,11 @@ arena_ichoose(tsd_t *tsd, arena_t *arena) {
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
return extent_lookup(tsdn, ptr, true);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
}
#endif
@ -1113,25 +1117,7 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t *tsdn, const void *ptr) {
extent_t *extent;
/*
* Return 0 if ptr is not within an extent managed by jemalloc. This
* function has two extra costs relative to isalloc():
* - The extent_lookup() call cannot claim to be a dependent lookup,
* which induces rtree lookup load dependencies.
* - The lookup may fail, so there is an extra branch to check for
* failure.
* */
extent = extent_lookup(tsdn, ptr, false);
if (extent == NULL) {
return 0;
}
assert(extent_state_get(extent) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
return isalloc(tsdn, extent, ptr);
return arena_vsalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void

View File

@ -14,8 +14,8 @@ extern large_dalloc_junk_t *large_dalloc_junk;
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
extern large_dalloc_maybe_junk_t *large_dalloc_maybe_junk;
#else
void large_dalloc_junk(void *ptr, size_t usize);
void large_dalloc_maybe_junk(void *ptr, size_t usize);
void large_dalloc_junk(void *ptr, size_t size);
void large_dalloc_maybe_junk(void *ptr, size_t size);
#endif
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);

View File

@ -77,6 +77,7 @@ arena_stats_merge
arena_tcache_fill_small
arena_tdata_get
arena_tdata_get_hard
arena_vsalloc
arenas
arenas_tdata_cleanup
b0get
@ -169,7 +170,6 @@ extent_list_init
extent_list_last
extent_list_remove
extent_list_replace
extent_lookup
extent_merge_wrapper
extent_past_get
extent_prof_tctx_get
@ -192,6 +192,7 @@ extent_split_wrapper
extent_state_get
extent_state_set
extent_szind_get
extent_szind_get_maybe_invalid
extent_szind_set
extent_usize_get
extent_zeroed_get
@ -413,25 +414,33 @@ psz2ind
psz2u
rtree_clear
rtree_delete
rtree_extent_read
rtree_extent_szind_read
rtree_leaf_alloc
rtree_leaf_dalloc
rtree_leaf_elm_acquire
rtree_leaf_elm_extent_read
rtree_leaf_elm_extent_write
rtree_leaf_elm_lookup
rtree_leaf_elm_lookup_hard
rtree_leaf_elm_read
rtree_leaf_elm_read_acquired
rtree_leaf_elm_release
rtree_leaf_elm_slab_read
rtree_leaf_elm_slab_write
rtree_leaf_elm_szind_read
rtree_leaf_elm_szind_write
rtree_leaf_elm_witness_access
rtree_leaf_elm_witness_acquire
rtree_leaf_elm_witness_release
rtree_leaf_elm_write
rtree_leaf_elm_write_acquired
rtree_leafkey
rtree_new
rtree_node_alloc
rtree_node_dalloc
rtree_read
rtree_subkey
rtree_szind_read
rtree_szind_slab_read
rtree_szind_slab_update
rtree_write
s2u
s2u_compute

View File

@ -4,21 +4,40 @@
#ifndef JEMALLOC_ENABLE_INLINE
uintptr_t rtree_leafkey(uintptr_t key);
uintptr_t rtree_subkey(uintptr_t key, unsigned level);
extent_t *rtree_leaf_elm_read(rtree_leaf_elm_t *elm, bool dependent);
void rtree_leaf_elm_write(rtree_leaf_elm_t *elm, const extent_t *extent);
extent_t *rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
szind_t rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
bool rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool acquired, bool dependent);
void rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool acquired, extent_t *extent);
void rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool acquired, szind_t szind);
void rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool acquired, bool slab);
void rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, extent_t *extent, szind_t szind, bool slab);
rtree_leaf_elm_t *rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, const extent_t *extent);
extent_t *rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent);
uintptr_t key, extent_t *extent, szind_t szind, bool slab);
rtree_leaf_elm_t *rtree_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
extent_t *rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
szind_t rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
bool rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, extent_t **r_extent,
szind_t *r_szind);
bool rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab);
void rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, szind_t szind, bool slab);
rtree_leaf_elm_t *rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
extent_t *rtree_leaf_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree,
rtree_leaf_elm_t *elm);
void rtree_leaf_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree,
rtree_leaf_elm_t *elm, const extent_t *extent);
void rtree_leaf_elm_release(tsdn_t *tsdn, const rtree_t *rtree,
void rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm);
void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key);
@ -45,38 +64,104 @@ rtree_subkey(uintptr_t key, unsigned level) {
return ((key >> shiftbits) & mask);
}
/*
* Atomic getters.
*
* dependent: Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_read(rtree_leaf_elm_t *elm, bool dependent) {
extent_t *extent;
if (dependent) {
/*
* Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without
* synchronization, because the rtree update became visible in
* memory before the pointer came into existence.
*/
extent = (extent_t *)atomic_load_p(&elm->extent,
ATOMIC_RELAXED);
} else {
/*
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
extent = (extent_t *)atomic_load_p(&elm->extent,
ATOMIC_ACQUIRE);
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, bool dependent) {
if (config_debug && acquired) {
assert(dependent);
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
/* Mask the lock bit. */
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
assert(!acquired || ((uintptr_t)extent & (uintptr_t)0x1) ==
(uintptr_t)0x1);
/* Mask lock bit. */
extent = (extent_t *)((uintptr_t)extent & ~((uintptr_t)0x1));
return extent;
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, bool dependent) {
if (config_debug && acquired) {
assert(dependent);
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
: ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, bool dependent) {
if (config_debug && acquired) {
assert(dependent);
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
ATOMIC_ACQUIRE);
}
JEMALLOC_INLINE void
rtree_leaf_elm_write(rtree_leaf_elm_t *elm, const extent_t *extent) {
atomic_store_p(&elm->extent, (void *)extent, ATOMIC_RELEASE);
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, extent_t *extent) {
if (config_debug && acquired) {
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
if (acquired) {
/* Overlay lock bit. */
extent = (extent_t *)((uintptr_t)extent | (uintptr_t)0x1);
}
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
}
JEMALLOC_INLINE void
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, szind_t szind) {
if (config_debug && acquired) {
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
assert(szind <= NSIZES);
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
}
JEMALLOC_INLINE void
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, bool slab) {
if (config_debug && acquired) {
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
}
JEMALLOC_INLINE void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool acquired, extent_t *extent, szind_t szind, bool slab) {
rtree_leaf_elm_slab_write(tsdn, rtree, elm, acquired, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, acquired, szind);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write(tsdn, rtree, elm, acquired, extent);
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
@ -124,34 +209,99 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
JEMALLOC_INLINE bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
const extent_t *extent) {
rtree_leaf_elm_t *elm;
extent_t *extent, szind_t szind, bool slab) {
/* Use rtree_clear() to set the extent to NULL. */
assert(extent != NULL);
assert(extent != NULL); /* Use rtree_clear() for this case. */
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, false, true);
if (elm == NULL) {
return true;
}
assert(rtree_leaf_elm_read(elm, false) == NULL);
rtree_leaf_elm_write(elm, extent);
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false, false) ==
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, false, extent, szind, slab);
return false;
}
JEMALLOC_ALWAYS_INLINE extent_t *
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
bool dependent) {
rtree_leaf_elm_t *elm;
elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent,
false);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, dependent, false);
if (!dependent && elm == NULL) {
return NULL;
}
assert(elm != NULL);
return elm;
}
return rtree_leaf_elm_read(elm, dependent);
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NULL;
}
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, false, dependent);
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NSIZES;
}
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, false, dependent);
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
JEMALLOC_ALWAYS_INLINE bool
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, false,
dependent);
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, false,
dependent);
return false;
}
JEMALLOC_ALWAYS_INLINE bool
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, false,
dependent);
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, false, dependent);
return false;
}
JEMALLOC_INLINE void
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
rtree_leaf_elm_slab_write(tsdn, rtree, elm, false, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, false, szind);
}
JEMALLOC_INLINE rtree_leaf_elm_t *
@ -162,18 +312,19 @@ rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
if (!dependent && elm == NULL) {
return NULL;
}
assert(elm != NULL);
spin_t spinner = SPIN_INITIALIZER;
while (true) {
/* The least significant bit serves as a lock. */
void *extent_and_lock = atomic_load_p(&elm->extent,
void *extent_and_lock = atomic_load_p(&elm->le_extent,
ATOMIC_RELAXED);
if (likely(((uintptr_t)extent_and_lock & (uintptr_t)0x1) == 0))
{
void *locked = (void *)((uintptr_t)extent_and_lock
| (uintptr_t)0x1);
if (likely(atomic_compare_exchange_strong_p(
&elm->extent, &extent_and_lock, locked,
&elm->le_extent, &extent_and_lock, locked,
ATOMIC_ACQUIRE, ATOMIC_RELAXED))) {
break;
}
@ -188,42 +339,11 @@ rtree_leaf_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
return elm;
}
JEMALLOC_INLINE extent_t *
rtree_leaf_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree,
rtree_leaf_elm_t *elm) {
extent_t *extent;
void *ptr = atomic_load_p(&elm->extent, ATOMIC_RELAXED);
assert(((uintptr_t)ptr & (uintptr_t)0x1) == (uintptr_t)0x1);
extent = (extent_t *)((uintptr_t)ptr & ~((uintptr_t)0x1));
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
if (config_debug) {
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
return extent;
}
JEMALLOC_INLINE void
rtree_leaf_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree,
rtree_leaf_elm_t *elm, const extent_t *extent) {
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
assert(((uintptr_t)atomic_load_p(&elm->extent, ATOMIC_RELAXED)
& (uintptr_t)0x1) == (uintptr_t)0x1);
if (config_debug) {
rtree_leaf_elm_witness_access(tsdn, rtree, elm);
}
atomic_store_p(&elm->extent, (void *)((uintptr_t)extent |
(uintptr_t)0x1), ATOMIC_RELEASE);
assert(rtree_leaf_elm_read_acquired(tsdn, rtree, elm) == extent);
}
JEMALLOC_INLINE void
rtree_leaf_elm_release(tsdn_t *tsdn, const rtree_t *rtree,
rtree_leaf_elm_t *elm) {
rtree_leaf_elm_write(elm, rtree_leaf_elm_read_acquired(tsdn, rtree,
elm));
rtree_leaf_elm_release(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm) {
extent_t *extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, true,
true);
rtree_leaf_elm_extent_write(tsdn, rtree, elm, false, extent);
if (config_debug) {
rtree_leaf_elm_witness_release(tsdn, rtree, elm);
}
@ -232,10 +352,9 @@ rtree_leaf_elm_release(tsdn_t *tsdn, const rtree_t *rtree,
JEMALLOC_INLINE void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm;
elm = rtree_leaf_elm_acquire(tsdn, rtree, rtree_ctx, key, true, false);
rtree_leaf_elm_write_acquired(tsdn, rtree, elm, NULL);
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn, rtree, rtree_ctx,
key, true, false);
rtree_leaf_elm_write(tsdn, rtree, elm, true, NULL, NSIZES, false);
rtree_leaf_elm_release(tsdn, rtree, elm);
}
#endif

View File

@ -2,11 +2,13 @@
#define JEMALLOC_INTERNAL_RTREE_STRUCTS_H
struct rtree_node_elm_s {
atomic_p_t child;
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
struct rtree_leaf_elm_s {
atomic_p_t extent;
atomic_p_t le_extent; /* (extent_t *) */
atomic_u_t le_szind; /* (szind_t) */
atomic_b_t le_slab; /* (bool) */
};
struct rtree_leaf_elm_witness_s {

View File

@ -1468,7 +1468,12 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
assert(usize <= SMALL_MAXCLASS);
extent_szind_set(extent, size2index(usize));
szind_t szind = size2index(usize);
extent_szind_set(extent, szind);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
szind, false);
prof_accum_cancel(tsdn, &arena->prof_accum, usize);
@ -1481,6 +1486,10 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
assert(ptr != NULL);
extent_szind_set(extent, NBINS);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
NBINS, false);
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);

View File

@ -476,11 +476,12 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
static void
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
rtree_leaf_elm_t *elm_b, const extent_t *extent) {
rtree_leaf_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent);
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, true, extent, szind,
slab);
if (elm_b != NULL) {
rtree_leaf_elm_write_acquired(tsdn, &extents_rtree, elm_b,
extent);
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, true, extent,
szind, slab);
}
}
@ -494,16 +495,15 @@ extent_rtree_release(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
}
static void
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
const extent_t *extent) {
size_t i;
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
szind_t szind) {
assert(extent_slab_get(extent));
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
/* Register interior. */
for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE), extent);
LG_PAGE), extent, szind, true);
}
}
@ -542,7 +542,7 @@ extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
}
static bool
extent_register_impl(tsdn_t *tsdn, const extent_t *extent, bool gdump_add) {
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
@ -551,9 +551,11 @@ extent_register_impl(tsdn_t *tsdn, const extent_t *extent, bool gdump_add) {
&elm_b)) {
return true;
}
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
if (extent_slab_get(extent)) {
extent_interior_register(tsdn, rtree_ctx, extent);
szind_t szind = extent_szind_get_maybe_invalid(extent);
bool slab = extent_slab_get(extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
if (slab) {
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
extent_rtree_release(tsdn, elm_a, elm_b);
@ -565,24 +567,24 @@ extent_register_impl(tsdn_t *tsdn, const extent_t *extent, bool gdump_add) {
}
static bool
extent_register(tsdn_t *tsdn, const extent_t *extent) {
extent_register(tsdn_t *tsdn, extent_t *extent) {
return extent_register_impl(tsdn, extent, true);
}
static bool
extent_register_no_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
return extent_register_impl(tsdn, extent, false);
}
static void
extent_reregister(tsdn_t *tsdn, const extent_t *extent) {
extent_reregister(tsdn_t *tsdn, extent_t *extent) {
bool err = extent_register(tsdn, extent);
assert(!err);
}
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
const extent_t *extent) {
extent_t *extent) {
size_t i;
assert(extent_slab_get(extent));
@ -602,7 +604,7 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) {
extent_rtree_acquire(tsdn, rtree_ctx, extent, true, false, &elm_a,
&elm_b);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
@ -653,13 +655,12 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent;
if (new_addr != NULL) {
rtree_leaf_elm_t *elm;
elm = rtree_leaf_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)new_addr, false, false);
rtree_leaf_elm_t *elm = rtree_leaf_elm_acquire(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)new_addr, false,
false);
if (elm != NULL) {
extent = rtree_leaf_elm_read_acquired(tsdn,
&extents_rtree, elm);
extent = rtree_leaf_elm_extent_read(tsdn,
&extents_rtree, elm, true, true);
if (extent != NULL) {
assert(extent_base_get(extent) == new_addr);
if (extent_arena_get(extent) != arena ||
@ -715,7 +716,8 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
if (leadsize != 0) {
extent_t *lead = extent;
extent = extent_split_wrapper(tsdn, arena, r_extent_hooks,
lead, leadsize, NSIZES, esize + trailsize, szind);
lead, leadsize, NSIZES, false, esize + trailsize, szind,
slab);
if (extent == NULL) {
extent_deregister(tsdn, lead);
extents_leak(tsdn, arena, r_extent_hooks, extents,
@ -728,7 +730,8 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */
if (trailsize != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
r_extent_hooks, extent, esize, szind, trailsize, NSIZES);
r_extent_hooks, extent, esize, szind, slab, trailsize,
NSIZES, false);
if (trail == NULL) {
extent_deregister(tsdn, extent);
extents_leak(tsdn, arena, r_extent_hooks, extents,
@ -742,6 +745,16 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
* splitting occurred.
*/
extent_szind_set(extent, szind);
if (szind != NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(extent), szind, slab);
if (slab && extent_size_get(extent) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
(uintptr_t)extent_past_get(extent) -
(uintptr_t)PAGE, szind, slab);
}
}
}
return extent;
@ -788,7 +801,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
assert(extent_state_get(extent) == extent_state_active);
if (slab) {
extent_slab_set(extent, slab);
extent_interior_register(tsdn, rtree_ctx, extent);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
if (*zero) {
@ -934,7 +947,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (leadsize != 0) {
extent_t *lead = extent;
extent = extent_split_wrapper(tsdn, arena, r_extent_hooks, lead,
leadsize, NSIZES, esize + trailsize, szind);
leadsize, NSIZES, false, esize + trailsize, szind, slab);
if (extent == NULL) {
extent_deregister(tsdn, lead);
extents_leak(tsdn, arena, r_extent_hooks, false, lead);
@ -947,7 +960,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */
if (trailsize != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
r_extent_hooks, extent, esize, szind, trailsize, NSIZES);
r_extent_hooks, extent, esize, szind, slab, trailsize,
NSIZES, false);
if (trail == NULL) {
extent_deregister(tsdn, extent);
extents_leak(tsdn, arena, r_extent_hooks,
@ -961,7 +975,21 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
extent_szind_set(extent, szind);
if (szind != NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(extent), szind, slab);
if (slab && extent_size_get(extent) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
(uintptr_t)extent_past_get(extent) -
(uintptr_t)PAGE, szind, slab);
}
}
}
if (*commit && !extent_committed_get(extent)) {
@ -987,7 +1015,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
&rtree_ctx_fallback);
extent_slab_set(extent, true);
extent_interior_register(tsdn, rtree_ctx, extent);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
if (*zero && !extent_zeroed_get(extent)) {
void *addr = extent_base_get(extent);
@ -1162,8 +1190,8 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
&extents_rtree, rtree_ctx,
(uintptr_t)extent_past_get(extent), false, false);
if (next_elm != NULL) {
extent_t *next = rtree_leaf_elm_read_acquired(tsdn,
&extents_rtree, next_elm);
extent_t *next = rtree_leaf_elm_extent_read(tsdn,
&extents_rtree, next_elm, true, true);
/*
* extents->mtx only protects against races for
* like-state extents, so call extent_can_coalesce()
@ -1188,8 +1216,8 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
&extents_rtree, rtree_ctx,
(uintptr_t)extent_before_get(extent), false, false);
if (prev_elm != NULL) {
extent_t *prev = rtree_leaf_elm_read_acquired(tsdn,
&extents_rtree, prev_elm);
extent_t *prev = rtree_leaf_elm_extent_read(tsdn,
&extents_rtree, prev_elm, true, true);
bool can_coalesce = (prev != NULL &&
extent_can_coalesce(arena, extents, extent, prev));
rtree_leaf_elm_release(tsdn, &extents_rtree, prev_elm);
@ -1231,7 +1259,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_slab_set(extent, false);
}
assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), true) == extent);
if (!extents->delay_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
@ -1467,7 +1496,7 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
extent_t *
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, size_t size_b, szind_t szind_b) {
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
assert(extent_size_get(extent) == size_a + size_b);
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
@ -1491,7 +1520,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_t lead;
extent_init(&lead, arena, extent_addr_get(extent), size_a,
extent_slab_get(extent), szind_a, extent_sn_get(extent),
slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent));
@ -1502,9 +1531,9 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
}
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, extent_slab_get(extent), szind_b,
extent_sn_get(extent), extent_state_get(extent),
extent_zeroed_get(extent), extent_committed_get(extent));
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent));
if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
&trail_elm_a, &trail_elm_b)) {
goto label_error_c;
@ -1519,8 +1548,10 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_size_set(extent, size_a);
extent_szind_set(extent, szind_a);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
szind_a, slab_a);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
szind_b, slab_b);
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
@ -1599,13 +1630,13 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
&b_elm_b);
if (a_elm_b != NULL) {
rtree_leaf_elm_write_acquired(tsdn, &extents_rtree, a_elm_b,
NULL);
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, true, NULL,
NSIZES, false);
rtree_leaf_elm_release(tsdn, &extents_rtree, a_elm_b);
}
if (b_elm_b != NULL) {
rtree_leaf_elm_write_acquired(tsdn, &extents_rtree, b_elm_a,
NULL);
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, true, NULL,
NSIZES, false);
rtree_leaf_elm_release(tsdn, &extents_rtree, b_elm_a);
} else {
b_elm_b = b_elm_a;
@ -1617,7 +1648,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_sn_get(a) : extent_sn_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
extent_dalloc(tsdn, extent_arena_get(b), b);

View File

@ -66,8 +66,8 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
#define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk)
#endif
void
large_dalloc_junk(void *ptr, size_t usize) {
memset(ptr, JEMALLOC_FREE_JUNK, usize);
large_dalloc_junk(void *ptr, size_t size) {
memset(ptr, JEMALLOC_FREE_JUNK, size);
}
#ifdef JEMALLOC_JET
#undef large_dalloc_junk
@ -80,14 +80,14 @@ large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk);
#define large_dalloc_maybe_junk JEMALLOC_N(n_large_dalloc_maybe_junk)
#endif
void
large_dalloc_maybe_junk(void *ptr, size_t usize) {
large_dalloc_maybe_junk(void *ptr, size_t size) {
if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the extent isn't about to be
* unmapped.
*/
if (!config_munmap || (have_dss && extent_in_dss(ptr))) {
large_dalloc_junk(ptr, usize);
large_dalloc_junk(ptr, size);
}
}
}
@ -115,7 +115,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
&extent_hooks, extent, usize + large_pad, size2index(usize),
diff, NSIZES);
false, diff, NSIZES, false);
if (trail == NULL) {
return true;
}
@ -182,7 +182,12 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
return true;
}
extent_szind_set(extent, size2index(usize));
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = size2index(usize);
extent_szind_set(extent, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(extent), szind, false);
if (config_stats && new_mapping) {
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);

View File

@ -57,9 +57,16 @@ get_large_size(size_t ind) {
/* Like ivsalloc(), but safe to call on discarded allocations. */
static size_t
vsalloc(tsdn_t *tsdn, const void *ptr) {
extent_t *extent;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
return 0;
}
extent = extent_lookup(tsdn, ptr, false);
if (extent == NULL) {
return 0;
}
@ -67,7 +74,11 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
return 0;
}
return isalloc(tsdn, extent, ptr);
if (szind == NSIZES) {
return 0;
}
return index2size(szind);
}
static unsigned

View File

@ -70,8 +70,8 @@ TEST_BEGIN(test_rtree_read_empty) {
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
test_rtree = &rtree;
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, PAGE, false),
"rtree_read() should return NULL for empty tree");
assert_ptr_null(rtree_extent_read(tsdn, &rtree, &rtree_ctx, PAGE,
false), "rtree_extent_read() should return NULL for empty tree");
rtree_delete(tsdn, &rtree);
test_rtree = NULL;
}
@ -99,6 +99,8 @@ thd_start(void *varg) {
sfmt = init_gen_rand(arg->seed);
extent = (extent_t *)malloc(sizeof(extent));
assert_ptr_not_null(extent, "Unexpected malloc() failure");
extent_init(extent, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
tsdn = tsdn_fetch();
for (i = 0; i < NITERS; i++) {
@ -109,18 +111,24 @@ thd_start(void *varg) {
&arg->rtree, &rtree_ctx, key, false, true);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_acquire() failure");
rtree_leaf_elm_write_acquired(tsdn, &arg->rtree, elm,
extent);
rtree_leaf_elm_write(tsdn, &arg->rtree, elm, true,
extent, NSIZES, false);
rtree_leaf_elm_release(tsdn, &arg->rtree, elm);
elm = rtree_leaf_elm_acquire(tsdn, &arg->rtree,
&rtree_ctx, key, true, false);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_acquire() failure");
rtree_leaf_elm_read_acquired(tsdn, &arg->rtree, elm);
rtree_leaf_elm_extent_read(tsdn, &arg->rtree, elm, true,
true);
rtree_leaf_elm_szind_read(tsdn, &arg->rtree, elm, true,
true);
rtree_leaf_elm_slab_read(tsdn, &arg->rtree, elm, true,
true);
rtree_leaf_elm_release(tsdn, &arg->rtree, elm);
} else {
rtree_read(tsdn, &arg->rtree, &rtree_ctx, key, false);
rtree_extent_read(tsdn, &arg->rtree, &rtree_ctx, key,
false);
}
}
@ -158,26 +166,33 @@ TEST_END
TEST_BEGIN(test_rtree_extrema) {
extent_t extent_a, extent_b;
tsdn_t *tsdn;
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false,
size2index(LARGE_MINCLASS), 0, extent_state_active, false, false);
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
tsdn = tsdn_fetch();
tsdn_t *tsdn = tsdn_fetch();
rtree_t rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
test_rtree = &rtree;
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, PAGE, &extent_a),
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, PAGE, &extent_a,
extent_szind_get(&extent_a), extent_slab_get(&extent_a)),
"Unexpected rtree_write() failure");
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, PAGE, true),
rtree_szind_slab_update(tsdn, &rtree, &rtree_ctx, PAGE,
extent_szind_get(&extent_a), extent_slab_get(&extent_a));
assert_ptr_eq(rtree_extent_read(tsdn, &rtree, &rtree_ctx, PAGE, true),
&extent_a,
"rtree_read() should return previously set value");
"rtree_extent_read() should return previously set value");
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0),
&extent_b), "Unexpected rtree_write() failure");
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, ~((uintptr_t)0),
true), &extent_b,
"rtree_read() should return previously set value");
&extent_b, extent_szind_get_maybe_invalid(&extent_b),
extent_slab_get(&extent_b)), "Unexpected rtree_write() failure");
assert_ptr_eq(rtree_extent_read(tsdn, &rtree, &rtree_ctx,
~((uintptr_t)0), true), &extent_b,
"rtree_extent_read() should return previously set value");
rtree_delete(tsdn, &rtree);
test_rtree = NULL;
@ -191,6 +206,9 @@ TEST_BEGIN(test_rtree_bits) {
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
rtree_t rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
@ -200,16 +218,17 @@ TEST_BEGIN(test_rtree_bits) {
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, keys[i],
&extent), "Unexpected rtree_write() failure");
&extent, NSIZES, false),
"Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx,
keys[j], true), &extent,
"rtree_read() should return previously set "
"value and ignore insignificant key bits; "
"i=%u, j=%u, set key=%#"FMTxPTR", get "
assert_ptr_eq(rtree_extent_read(tsdn, &rtree,
&rtree_ctx, keys[j], true),
&extent, "rtree_extent_read() should return "
"previously set value and ignore insignificant key "
"bits; i=%u, j=%u, set key=%#"FMTxPTR", get "
"key=%#"FMTxPTR, i, j, keys[i], keys[j]);
}
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx,
assert_ptr_null(rtree_extent_read(tsdn, &rtree, &rtree_ctx,
(((uintptr_t)2) << LG_PAGE), false),
"Only leftmost rtree leaf should be set; i=%u", i);
rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]);
@ -226,10 +245,13 @@ TEST_BEGIN(test_rtree_random) {
sfmt_t *sfmt = init_gen_rand(SEED);
tsdn_t *tsdn = tsdn_fetch();
uintptr_t keys[NSET];
extent_t extent;
rtree_t rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_state_active, false, false);
test_rtree = &rtree;
assert_false(rtree_new(&rtree), "Unexpected rtree_new() failure");
@ -239,26 +261,30 @@ TEST_BEGIN(test_rtree_random) {
&rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_acquire() failure");
rtree_leaf_elm_write_acquired(tsdn, &rtree, elm, &extent);
rtree_leaf_elm_write(tsdn, &rtree, elm, true, &extent, NSIZES,
false);
rtree_leaf_elm_release(tsdn, &rtree, elm);
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i],
true), &extent,
"rtree_read() should return previously set value");
assert_ptr_eq(rtree_extent_read(tsdn, &rtree, &rtree_ctx,
keys[i], true), &extent,
"rtree_extent_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i],
true), &extent,
"rtree_read() should return previously set value, i=%u", i);
assert_ptr_eq(rtree_extent_read(tsdn, &rtree, &rtree_ctx,
keys[i], true), &extent,
"rtree_extent_read() should return previously set value, "
"i=%u", i);
}
for (unsigned i = 0; i < NSET; i++) {
rtree_clear(tsdn, &rtree, &rtree_ctx, keys[i]);
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i],
true), "rtree_read() should return previously set value");
assert_ptr_null(rtree_extent_read(tsdn, &rtree, &rtree_ctx,
keys[i], true),
"rtree_extent_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, keys[i],
true), "rtree_read() should return previously set value");
assert_ptr_null(rtree_extent_read(tsdn, &rtree, &rtree_ctx,
keys[i], true),
"rtree_extent_read() should return previously set value");
}
rtree_delete(tsdn, &rtree);