2017-05-24 05:26:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_RTREE_H
|
|
|
|
#define JEMALLOC_INTERNAL_RTREE_H
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/base.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/edata.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
|
|
|
#include "jemalloc/internal/rtree_tsd.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/tsd.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This radix tree implementation is tailored to the singular purpose of
|
|
|
|
* associating metadata with extents that are currently owned by jemalloc.
|
|
|
|
*
|
|
|
|
*******************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Number of high insignificant bits. */
|
|
|
|
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
|
|
|
|
/* Number of low insigificant bits. */
|
|
|
|
#define RTREE_NLIB LG_PAGE
|
|
|
|
/* Number of significant bits. */
|
|
|
|
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
|
|
|
|
/* Number of levels in radix tree. */
|
|
|
|
#if RTREE_NSB <= 10
|
|
|
|
# define RTREE_HEIGHT 1
|
|
|
|
#elif RTREE_NSB <= 36
|
|
|
|
# define RTREE_HEIGHT 2
|
|
|
|
#elif RTREE_NSB <= 52
|
|
|
|
# define RTREE_HEIGHT 3
|
|
|
|
#else
|
|
|
|
# error Unsupported number of significant virtual address bits
|
|
|
|
#endif
|
|
|
|
/* Use compact leaf representation if virtual address encoding allows. */
|
2017-12-15 04:46:39 +08:00
|
|
|
#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
|
2017-05-24 05:26:31 +08:00
|
|
|
# define RTREE_LEAF_COMPACT
|
|
|
|
#endif
|
|
|
|
|
|
|
|
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
|
|
|
struct rtree_node_elm_s {
|
|
|
|
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
|
|
|
};
|
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
typedef struct rtree_metadata_s rtree_metadata_t;
|
|
|
|
struct rtree_metadata_s {
|
2020-03-14 02:47:51 +08:00
|
|
|
szind_t szind;
|
2021-02-27 07:32:41 +08:00
|
|
|
extent_state_t state; /* Mirrors edata->state. */
|
2021-02-27 07:11:58 +08:00
|
|
|
bool is_head; /* Mirrors edata->is_head. */
|
2020-03-14 02:47:51 +08:00
|
|
|
bool slab;
|
|
|
|
};
|
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
typedef struct rtree_contents_s rtree_contents_t;
|
|
|
|
struct rtree_contents_s {
|
|
|
|
edata_t *edata;
|
|
|
|
rtree_metadata_t metadata;
|
|
|
|
};
|
|
|
|
|
2021-02-27 07:32:41 +08:00
|
|
|
#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
|
|
|
|
#define RTREE_LEAF_STATE_SHIFT 2
|
|
|
|
#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
|
|
|
|
|
2017-05-24 05:26:31 +08:00
|
|
|
struct rtree_leaf_elm_s {
|
|
|
|
#ifdef RTREE_LEAF_COMPACT
|
|
|
|
/*
|
|
|
|
* Single pointer-width field containing all three leaf element fields.
|
|
|
|
* For example, on a 64-bit x64 system with 48 significant virtual
|
2019-12-10 06:36:45 +08:00
|
|
|
* memory address bits, the index, edata, and slab fields are packed as
|
2017-05-24 05:26:31 +08:00
|
|
|
* such:
|
|
|
|
*
|
|
|
|
* x: index
|
2019-12-10 06:36:45 +08:00
|
|
|
* e: edata
|
2021-02-27 07:32:41 +08:00
|
|
|
* s: state
|
2021-02-27 07:11:58 +08:00
|
|
|
* h: is_head
|
2017-05-24 05:26:31 +08:00
|
|
|
* b: slab
|
|
|
|
*
|
2021-02-27 07:32:41 +08:00
|
|
|
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
|
2017-05-24 05:26:31 +08:00
|
|
|
*/
|
|
|
|
atomic_p_t le_bits;
|
|
|
|
#else
|
2019-12-10 06:36:45 +08:00
|
|
|
atomic_p_t le_edata; /* (edata_t *) */
|
2020-03-16 06:49:42 +08:00
|
|
|
/*
|
2021-03-05 06:33:40 +08:00
|
|
|
* From high to low bits: szind (8 bits), state (4 bits), is_head, slab
|
2020-03-16 06:49:42 +08:00
|
|
|
*/
|
|
|
|
atomic_u_t le_metadata;
|
2017-05-24 05:26:31 +08:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct rtree_level_s rtree_level_t;
|
|
|
|
struct rtree_level_s {
|
|
|
|
/* Number of key bits distinguished by this level. */
|
|
|
|
unsigned bits;
|
|
|
|
/*
|
|
|
|
* Cumulative number of key bits distinguished by traversing to
|
|
|
|
* corresponding tree level.
|
|
|
|
*/
|
|
|
|
unsigned cumbits;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct rtree_s rtree_t;
|
|
|
|
struct rtree_s {
|
2020-02-18 05:11:10 +08:00
|
|
|
base_t *base;
|
2017-05-24 05:26:31 +08:00
|
|
|
malloc_mutex_t init_lock;
|
|
|
|
/* Number of elements based on rtree_levels[0].bits. */
|
|
|
|
#if RTREE_HEIGHT > 1
|
|
|
|
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
|
|
|
#else
|
|
|
|
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Split the bits into one to three partitions depending on number of
|
|
|
|
* significant bits. It the number of bits does not divide evenly into the
|
|
|
|
* number of levels, place one remainder bit per level starting at the leaf
|
|
|
|
* level.
|
|
|
|
*/
|
|
|
|
static const rtree_level_t rtree_levels[] = {
|
|
|
|
#if RTREE_HEIGHT == 1
|
|
|
|
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
|
|
|
|
#elif RTREE_HEIGHT == 2
|
|
|
|
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
|
|
|
|
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
|
|
|
|
#elif RTREE_HEIGHT == 3
|
|
|
|
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
|
|
|
|
{RTREE_NSB/3 + RTREE_NSB%3/2,
|
|
|
|
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
|
|
|
|
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
|
|
|
|
#else
|
|
|
|
# error Unsupported rtree height
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2020-02-18 05:11:10 +08:00
|
|
|
bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
|
2017-05-24 05:26:31 +08:00
|
|
|
|
|
|
|
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
|
|
|
|
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
2017-04-18 07:35:04 +08:00
|
|
|
|
2021-03-18 07:35:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
|
|
|
rtree_leaf_maskbits(void) {
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
2017-02-07 05:17:12 +08:00
|
|
|
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
|
|
|
rtree_levels[RTREE_HEIGHT-1].bits);
|
2021-03-18 07:35:57 +08:00
|
|
|
return ptrbits - cumbits;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
|
|
|
rtree_leafkey(uintptr_t key) {
|
|
|
|
uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
|
2017-02-04 12:21:56 +08:00
|
|
|
return (key & mask);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
2017-04-15 02:05:38 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
rtree_cache_direct_map(uintptr_t key) {
|
2021-03-18 07:35:57 +08:00
|
|
|
return (size_t)((key >> rtree_leaf_maskbits()) &
|
|
|
|
(RTREE_CTX_NCACHE - 1));
|
2017-04-15 02:05:38 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
2017-02-07 05:17:12 +08:00
|
|
|
rtree_subkey(uintptr_t key, unsigned level) {
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
2017-02-07 05:17:12 +08:00
|
|
|
unsigned cumbits = rtree_levels[level].cumbits;
|
2017-02-04 12:21:56 +08:00
|
|
|
unsigned shiftbits = ptrbits - cumbits;
|
2017-02-07 05:17:12 +08:00
|
|
|
unsigned maskbits = rtree_levels[level].bits;
|
2017-02-10 04:31:11 +08:00
|
|
|
uintptr_t mask = (ZU(1) << maskbits) - 1;
|
2017-02-04 12:21:56 +08:00
|
|
|
return ((key >> shiftbits) & mask);
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
/*
|
|
|
|
* Atomic getters.
|
|
|
|
*
|
|
|
|
* dependent: Reading a value on behalf of a pointer to a valid allocation
|
|
|
|
* is guaranteed to be a clean read even without synchronization,
|
|
|
|
* because the rtree update became visible in memory before the
|
|
|
|
* pointer came into existence.
|
|
|
|
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
|
|
|
* dependent on a previous rtree write, which means a stale read
|
|
|
|
* could result if synchronization were omitted here.
|
|
|
|
*/
|
2017-03-21 07:38:21 +08:00
|
|
|
# ifdef RTREE_LEAF_COMPACT
|
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
2018-05-03 17:40:53 +08:00
|
|
|
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
|
|
|
|
rtree_leaf_elm_t *elm, bool dependent) {
|
2017-03-21 07:38:21 +08:00
|
|
|
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
2017-05-03 12:42:33 +08:00
|
|
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
2017-03-21 07:38:21 +08:00
|
|
|
}
|
|
|
|
|
2020-03-14 02:47:51 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
2020-03-16 06:49:42 +08:00
|
|
|
rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
|
2021-03-05 06:33:40 +08:00
|
|
|
assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
|
2020-03-14 02:47:51 +08:00
|
|
|
uintptr_t edata_bits = (uintptr_t)contents.edata
|
|
|
|
& (((uintptr_t)1 << LG_VADDR) - 1);
|
2021-02-27 07:32:41 +08:00
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
|
|
|
|
uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
|
2021-02-27 07:11:58 +08:00
|
|
|
uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
|
2021-02-27 07:32:41 +08:00
|
|
|
uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
|
|
|
|
RTREE_LEAF_STATE_SHIFT;
|
|
|
|
uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
|
|
|
|
slab_bits;
|
2021-02-27 07:11:58 +08:00
|
|
|
assert((edata_bits & metadata_bits) == 0);
|
|
|
|
|
|
|
|
return edata_bits | metadata_bits;
|
2020-03-14 02:47:51 +08:00
|
|
|
}
|
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_contents_t
|
2020-03-14 02:47:51 +08:00
|
|
|
rtree_leaf_elm_bits_decode(uintptr_t bits) {
|
2020-03-16 06:49:42 +08:00
|
|
|
rtree_contents_t contents;
|
2020-03-14 02:47:51 +08:00
|
|
|
/* Do the easy things first. */
|
2020-03-16 06:49:42 +08:00
|
|
|
contents.metadata.szind = bits >> LG_VADDR;
|
|
|
|
contents.metadata.slab = (bool)(bits & 1);
|
2021-02-27 07:11:58 +08:00
|
|
|
contents.metadata.is_head = (bool)(bits & (1 << 1));
|
|
|
|
|
2021-02-27 07:32:41 +08:00
|
|
|
uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
|
|
|
|
RTREE_LEAF_STATE_SHIFT;
|
2021-03-05 06:33:40 +08:00
|
|
|
assert(state_bits <= extent_state_max);
|
2021-02-27 07:32:41 +08:00
|
|
|
contents.metadata.state = (extent_state_t)state_bits;
|
|
|
|
|
|
|
|
uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
|
2017-09-30 04:54:08 +08:00
|
|
|
# ifdef __aarch64__
|
|
|
|
/*
|
|
|
|
* aarch64 doesn't sign extend the highest virtual address bit to set
|
2020-03-14 02:47:51 +08:00
|
|
|
* the higher ones. Instead, the high bits get zeroed.
|
2017-09-30 04:54:08 +08:00
|
|
|
*/
|
|
|
|
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
|
2021-02-27 07:11:58 +08:00
|
|
|
/* Mask off metadata. */
|
2017-09-30 04:54:08 +08:00
|
|
|
uintptr_t mask = high_bit_mask & low_bit_mask;
|
2023-07-25 01:33:36 +08:00
|
|
|
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
|
2020-03-14 02:47:51 +08:00
|
|
|
contents.edata = (edata_t *)(bits & mask);
|
2017-09-30 04:54:08 +08:00
|
|
|
# else
|
2021-02-27 07:11:58 +08:00
|
|
|
/* Restore sign-extended high bits, mask metadata bits. */
|
2023-07-25 01:33:36 +08:00
|
|
|
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
|
2020-03-14 02:47:51 +08:00
|
|
|
contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
|
2021-02-27 07:32:41 +08:00
|
|
|
>> RTREE_NHIB) & low_bit_mask);
|
2017-09-30 04:54:08 +08:00
|
|
|
# endif
|
2021-03-05 06:33:40 +08:00
|
|
|
assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
|
2020-03-14 02:47:51 +08:00
|
|
|
return contents;
|
2017-03-21 07:38:21 +08:00
|
|
|
}
|
|
|
|
|
2020-03-14 02:47:51 +08:00
|
|
|
# endif /* RTREE_LEAF_COMPACT */
|
2017-03-21 07:38:21 +08:00
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_contents_t
|
|
|
|
rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
|
|
|
bool dependent) {
|
2017-03-21 07:38:21 +08:00
|
|
|
#ifdef RTREE_LEAF_COMPACT
|
2017-05-16 05:23:51 +08:00
|
|
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
2020-03-16 06:49:42 +08:00
|
|
|
rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
|
|
|
|
return contents;
|
2017-03-21 07:38:21 +08:00
|
|
|
#else
|
2020-03-16 06:49:42 +08:00
|
|
|
rtree_contents_t contents;
|
|
|
|
unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
|
2017-03-17 08:57:52 +08:00
|
|
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
2020-03-16 06:49:42 +08:00
|
|
|
contents.metadata.slab = (bool)(metadata_bits & 1);
|
2021-02-27 07:11:58 +08:00
|
|
|
contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
|
2021-02-27 07:32:41 +08:00
|
|
|
|
|
|
|
uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
|
|
|
|
RTREE_LEAF_STATE_SHIFT;
|
2021-03-05 06:33:40 +08:00
|
|
|
assert(state_bits <= extent_state_max);
|
2021-02-27 07:32:41 +08:00
|
|
|
contents.metadata.state = (extent_state_t)state_bits;
|
|
|
|
contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
|
|
|
|
RTREE_LEAF_STATE_WIDTH);
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
|
|
|
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
2017-03-17 08:57:52 +08:00
|
|
|
|
2020-03-16 06:49:42 +08:00
|
|
|
return contents;
|
2017-03-21 07:38:21 +08:00
|
|
|
#endif
|
2017-03-17 08:57:52 +08:00
|
|
|
}
|
|
|
|
|
2021-03-18 07:35:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
rtree_contents_encode(rtree_contents_t contents, void **bits,
|
|
|
|
unsigned *additional) {
|
2017-03-27 19:08:51 +08:00
|
|
|
#ifdef RTREE_LEAF_COMPACT
|
2023-07-25 01:33:36 +08:00
|
|
|
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
|
2021-03-18 07:35:57 +08:00
|
|
|
*bits = (void *)rtree_leaf_elm_bits_encode(contents);
|
2023-05-13 04:17:52 +08:00
|
|
|
/* Suppress spurious warning from static analysis */
|
|
|
|
if (config_debug) {
|
|
|
|
*additional = 0;
|
|
|
|
}
|
2017-03-21 07:38:21 +08:00
|
|
|
#else
|
2021-03-18 07:35:57 +08:00
|
|
|
*additional = (unsigned)contents.metadata.slab
|
2021-02-27 07:11:58 +08:00
|
|
|
| ((unsigned)contents.metadata.is_head << 1)
|
2021-02-27 07:32:41 +08:00
|
|
|
| ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
|
|
|
|
| ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
|
|
|
|
RTREE_LEAF_STATE_WIDTH));
|
2021-03-18 07:35:57 +08:00
|
|
|
*bits = contents.edata;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
|
|
|
|
rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
|
|
|
|
#ifdef RTREE_LEAF_COMPACT
|
|
|
|
atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
|
|
|
|
#else
|
|
|
|
atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
|
2017-03-17 08:57:52 +08:00
|
|
|
/*
|
2019-12-10 06:36:45 +08:00
|
|
|
* Write edata last, since the element is atomically considered valid
|
|
|
|
* as soon as the edata field is non-NULL.
|
2017-03-17 08:57:52 +08:00
|
|
|
*/
|
2021-03-18 07:35:57 +08:00
|
|
|
atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
|
2017-03-21 07:38:21 +08:00
|
|
|
#endif
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
|
|
|
|
2021-03-18 07:35:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
|
|
|
|
rtree_leaf_elm_t *elm, rtree_contents_t contents) {
|
|
|
|
assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
|
|
|
|
void *bits;
|
|
|
|
unsigned additional;
|
|
|
|
rtree_contents_encode(contents, &bits, &additional);
|
|
|
|
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
|
|
|
|
}
|
|
|
|
|
2021-03-05 06:33:40 +08:00
|
|
|
/* The state field can be updated independently (and more frequently). */
|
2021-03-18 07:35:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2021-03-05 06:33:40 +08:00
|
|
|
rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
|
|
|
|
rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
|
|
|
|
assert(elm1 != NULL);
|
|
|
|
#ifdef RTREE_LEAF_COMPACT
|
|
|
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
|
|
|
|
/* dependent */ true);
|
|
|
|
bits &= ~RTREE_LEAF_STATE_MASK;
|
|
|
|
bits |= state << RTREE_LEAF_STATE_SHIFT;
|
2023-07-25 01:33:36 +08:00
|
|
|
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
|
2021-03-05 06:33:40 +08:00
|
|
|
atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
|
|
|
|
if (elm2 != NULL) {
|
2023-07-25 01:33:36 +08:00
|
|
|
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
|
2021-03-05 06:33:40 +08:00
|
|
|
atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
|
|
|
|
bits &= ~RTREE_LEAF_STATE_MASK;
|
|
|
|
bits |= state << RTREE_LEAF_STATE_SHIFT;
|
|
|
|
atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
|
|
|
|
if (elm2 != NULL) {
|
|
|
|
atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-03-20 08:58:44 +08:00
|
|
|
/*
|
2021-10-21 05:17:57 +08:00
|
|
|
* Tries to look up the key in the L1 cache, returning false if there's a hit, or
|
|
|
|
* true if there's a miss.
|
|
|
|
* Key is allowed to be NULL; returns true in this case.
|
2020-03-20 08:58:44 +08:00
|
|
|
*/
|
2021-10-21 05:17:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2020-03-20 08:58:44 +08:00
|
|
|
rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
2021-10-21 05:17:57 +08:00
|
|
|
uintptr_t key, rtree_leaf_elm_t **elm) {
|
2020-03-20 08:58:44 +08:00
|
|
|
size_t slot = rtree_cache_direct_map(key);
|
|
|
|
uintptr_t leafkey = rtree_leafkey(key);
|
|
|
|
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
|
|
|
|
2021-10-21 05:17:57 +08:00
|
|
|
if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
|
|
|
|
return true;
|
2020-03-20 08:58:44 +08:00
|
|
|
}
|
2021-10-21 05:17:57 +08:00
|
|
|
|
|
|
|
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
|
|
|
assert(leaf != NULL);
|
|
|
|
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
|
|
|
*elm = &leaf[subkey];
|
|
|
|
|
|
|
|
return false;
|
2017-03-27 19:08:51 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 00:46:42 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
|
|
|
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
2017-01-16 08:56:30 +08:00
|
|
|
uintptr_t key, bool dependent, bool init_missing) {
|
2017-02-05 18:50:59 +08:00
|
|
|
assert(key != 0);
|
2016-03-28 18:06:35 +08:00
|
|
|
assert(!dependent || !init_missing);
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2017-04-15 02:05:38 +08:00
|
|
|
size_t slot = rtree_cache_direct_map(key);
|
2017-02-05 18:50:59 +08:00
|
|
|
uintptr_t leafkey = rtree_leafkey(key);
|
2017-03-28 12:50:38 +08:00
|
|
|
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
|
|
|
|
2017-04-15 02:05:38 +08:00
|
|
|
/* Fast path: L1 direct mapped cache. */
|
|
|
|
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
|
|
|
|
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
|
|
|
assert(leaf != NULL);
|
|
|
|
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
|
|
|
return &leaf[subkey];
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Search the L2 LRU cache. On hit, swap the matching element into the
|
|
|
|
* slot in L1 cache, and move the position in L2 up by 1.
|
|
|
|
*/
|
|
|
|
#define RTREE_CACHE_CHECK_L2(i) do { \
|
|
|
|
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
|
|
|
|
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
|
2017-03-29 08:14:43 +08:00
|
|
|
assert(leaf != NULL); \
|
|
|
|
if (i > 0) { \
|
2017-03-28 02:48:39 +08:00
|
|
|
/* Bubble up by one. */ \
|
2017-04-15 02:05:38 +08:00
|
|
|
rtree_ctx->l2_cache[i].leafkey = \
|
|
|
|
rtree_ctx->l2_cache[i - 1].leafkey; \
|
|
|
|
rtree_ctx->l2_cache[i].leaf = \
|
|
|
|
rtree_ctx->l2_cache[i - 1].leaf; \
|
|
|
|
rtree_ctx->l2_cache[i - 1].leafkey = \
|
|
|
|
rtree_ctx->cache[slot].leafkey; \
|
|
|
|
rtree_ctx->l2_cache[i - 1].leaf = \
|
|
|
|
rtree_ctx->cache[slot].leaf; \
|
|
|
|
} else { \
|
|
|
|
rtree_ctx->l2_cache[0].leafkey = \
|
|
|
|
rtree_ctx->cache[slot].leafkey; \
|
|
|
|
rtree_ctx->l2_cache[0].leaf = \
|
|
|
|
rtree_ctx->cache[slot].leaf; \
|
2017-02-04 12:21:56 +08:00
|
|
|
} \
|
2017-04-15 02:05:38 +08:00
|
|
|
rtree_ctx->cache[slot].leafkey = leafkey; \
|
|
|
|
rtree_ctx->cache[slot].leaf = leaf; \
|
2017-03-29 08:14:43 +08:00
|
|
|
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
|
|
|
|
return &leaf[subkey]; \
|
2017-02-05 18:50:59 +08:00
|
|
|
} \
|
2017-02-04 12:21:56 +08:00
|
|
|
} while (0)
|
2017-03-28 02:48:39 +08:00
|
|
|
/* Check the first cache entry. */
|
2017-04-15 02:05:38 +08:00
|
|
|
RTREE_CACHE_CHECK_L2(0);
|
|
|
|
/* Search the remaining cache elements. */
|
|
|
|
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
|
|
|
|
RTREE_CACHE_CHECK_L2(i);
|
2017-02-05 18:50:59 +08:00
|
|
|
}
|
2017-04-15 02:05:38 +08:00
|
|
|
#undef RTREE_CACHE_CHECK_L2
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2017-03-17 00:46:42 +08:00
|
|
|
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
|
|
|
|
dependent, init_missing);
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2020-03-20 08:58:44 +08:00
|
|
|
/*
|
|
|
|
* Returns true on lookup failure.
|
|
|
|
*/
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline bool
|
2020-03-20 08:58:44 +08:00
|
|
|
rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key, rtree_contents_t *r_contents) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
2020-03-20 08:58:44 +08:00
|
|
|
key, /* dependent */ false, /* init_missing */ false);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (elm == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2020-03-20 08:58:44 +08:00
|
|
|
*r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
|
|
|
|
/* dependent */ false);
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-03-28 18:06:35 +08:00
|
|
|
}
|
|
|
|
|
2020-03-20 08:58:44 +08:00
|
|
|
static inline rtree_contents_t
|
|
|
|
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key) {
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
2020-03-20 08:58:44 +08:00
|
|
|
key, /* dependent */ true, /* init_missing */ false);
|
2017-03-17 08:57:52 +08:00
|
|
|
assert(elm != NULL);
|
2020-03-20 08:58:44 +08:00
|
|
|
return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
|
2017-03-17 08:57:52 +08:00
|
|
|
}
|
|
|
|
|
2020-03-20 08:58:44 +08:00
|
|
|
static inline rtree_metadata_t
|
|
|
|
rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key) {
|
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
|
|
|
key, /* dependent */ true, /* init_missing */ false);
|
|
|
|
assert(elm != NULL);
|
|
|
|
return rtree_leaf_elm_read(tsdn, rtree, elm,
|
|
|
|
/* dependent */ true).metadata;
|
2017-03-17 08:57:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-10-21 05:17:57 +08:00
|
|
|
* Returns true when the request cannot be fulfilled by fastpath.
|
2017-03-17 08:57:52 +08:00
|
|
|
*/
|
2020-03-20 08:58:44 +08:00
|
|
|
static inline bool
|
|
|
|
rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
|
2021-10-21 05:17:57 +08:00
|
|
|
rtree_leaf_elm_t *elm;
|
|
|
|
/*
|
|
|
|
* Should check the bool return value (lookup success or not) instead of
|
|
|
|
* elm == NULL (which will result in an extra branch). This is because
|
|
|
|
* when the cache lookup succeeds, there will never be a NULL pointer
|
|
|
|
* returned (which is unknown to the compiler).
|
|
|
|
*/
|
|
|
|
if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
|
2017-03-17 08:57:52 +08:00
|
|
|
return true;
|
|
|
|
}
|
2021-10-21 05:17:57 +08:00
|
|
|
assert(elm != NULL);
|
2020-03-20 08:58:44 +08:00
|
|
|
*r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
|
|
|
|
/* dependent */ true).metadata;
|
2017-03-17 08:57:52 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-03-28 18:06:35 +08:00
|
|
|
|
2021-03-18 07:35:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
|
|
|
|
assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
|
|
|
|
/*
|
|
|
|
* Only used for emap_(de)register_interior, which implies the
|
|
|
|
* boundaries have been registered already. Therefore all the lookups
|
|
|
|
* are dependent w/o init_missing, assuming the range spans across at
|
|
|
|
* most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
|
|
|
|
*/
|
|
|
|
void *bits;
|
|
|
|
unsigned additional;
|
|
|
|
rtree_contents_encode(contents, &bits, &additional);
|
|
|
|
|
|
|
|
rtree_leaf_elm_t *elm = NULL; /* Dead store. */
|
|
|
|
for (uintptr_t addr = base; addr <= end; addr += PAGE) {
|
|
|
|
if (addr == base ||
|
|
|
|
(addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
|
|
|
|
elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
|
|
|
|
/* dependent */ true, /* init_missing */ false);
|
|
|
|
assert(elm != NULL);
|
|
|
|
}
|
|
|
|
assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
|
|
|
|
/* dependent */ true, /* init_missing */ false));
|
|
|
|
assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
|
|
|
|
/* dependent */ true).edata != NULL);
|
|
|
|
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
|
|
|
|
elm++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t base, uintptr_t end, rtree_contents_t contents) {
|
|
|
|
rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
|
|
|
|
/* clearing */ false);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2020-03-20 08:58:44 +08:00
|
|
|
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
|
|
|
rtree_contents_t contents) {
|
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
|
|
|
key, /* dependent */ false, /* init_missing */ true);
|
|
|
|
if (elm == NULL) {
|
2018-10-19 03:51:54 +08:00
|
|
|
return true;
|
|
|
|
}
|
2020-03-16 06:49:42 +08:00
|
|
|
|
2020-03-20 08:58:44 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, rtree, elm, contents);
|
2020-03-16 06:49:42 +08:00
|
|
|
|
2017-03-17 08:57:52 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t key) {
|
2020-03-20 08:58:44 +08:00
|
|
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
|
|
|
key, /* dependent */ true, /* init_missing */ false);
|
|
|
|
assert(elm != NULL);
|
2020-03-16 06:49:42 +08:00
|
|
|
assert(rtree_leaf_elm_read(tsdn, rtree, elm,
|
2020-03-20 08:58:44 +08:00
|
|
|
/* dependent */ true).edata != NULL);
|
2020-03-16 06:49:42 +08:00
|
|
|
rtree_contents_t contents;
|
2020-03-14 02:47:51 +08:00
|
|
|
contents.edata = NULL;
|
2020-03-16 06:49:42 +08:00
|
|
|
contents.metadata.szind = SC_NSIZES;
|
|
|
|
contents.metadata.slab = false;
|
2021-02-27 07:11:58 +08:00
|
|
|
contents.metadata.is_head = false;
|
2021-02-27 07:32:41 +08:00
|
|
|
contents.metadata.state = (extent_state_t)0;
|
2020-03-14 02:47:51 +08:00
|
|
|
rtree_leaf_elm_write(tsdn, rtree, elm, contents);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
2021-03-18 07:35:57 +08:00
|
|
|
static inline void
|
|
|
|
rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
|
|
|
uintptr_t base, uintptr_t end) {
|
|
|
|
rtree_contents_t contents;
|
|
|
|
contents.edata = NULL;
|
|
|
|
contents.metadata.szind = SC_NSIZES;
|
|
|
|
contents.metadata.slab = false;
|
|
|
|
contents.metadata.is_head = false;
|
|
|
|
contents.metadata.state = (extent_state_t)0;
|
|
|
|
rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
|
|
|
|
/* clearing */ true);
|
|
|
|
}
|
|
|
|
|
2017-05-24 05:26:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_RTREE_H */
|