2010-09-06 01:35:13 +08:00
|
|
|
/*
|
|
|
|
* This radix tree implementation is tailored to the singular purpose of
|
2015-01-31 14:54:08 +08:00
|
|
|
* associating metadata with chunks that are currently owned by jemalloc.
|
2010-09-06 01:35:13 +08:00
|
|
|
*
|
|
|
|
*******************************************************************************
|
|
|
|
*/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
|
|
|
typedef struct rtree_level_s rtree_level_t;
|
2010-09-06 01:35:13 +08:00
|
|
|
typedef struct rtree_s rtree_t;
|
|
|
|
|
|
|
|
/*
|
2015-01-31 14:54:08 +08:00
|
|
|
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
|
|
|
|
* machine address width.
|
2010-09-06 01:35:13 +08:00
|
|
|
*/
|
2015-01-31 14:54:08 +08:00
|
|
|
#define LG_RTREE_BITS_PER_LEVEL 4
|
|
|
|
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
|
2016-03-23 08:54:35 +08:00
|
|
|
/*
|
|
|
|
* Avoid math in RTREE_HEIGHT_MAX definition so that it can be used in cpp
|
|
|
|
* conditionals. The following defininitions are precomputed equivalents to:
|
|
|
|
*
|
|
|
|
* #define RTREE_HEIGHT_MAX \
|
|
|
|
* ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
|
|
|
|
*/
|
|
|
|
#if LG_RTREE_BITS_PER_LEVEL == 2
|
|
|
|
# if LG_SIZEOF_PTR == 3
|
|
|
|
# define RTREE_HEIGHT_MAX 16
|
|
|
|
# elif LG_SIZEOF_PTR == 2
|
|
|
|
# define RTREE_HEIGHT_MAX 8
|
|
|
|
# endif
|
|
|
|
#elif LG_RTREE_BITS_PER_LEVEL == 3
|
|
|
|
# if LG_SIZEOF_PTR == 3
|
|
|
|
# define RTREE_HEIGHT_MAX 8
|
|
|
|
# elif LG_SIZEOF_PTR == 2
|
|
|
|
# define RTREE_HEIGHT_MAX 4
|
|
|
|
# endif
|
|
|
|
#elif LG_RTREE_BITS_PER_LEVEL == 4
|
|
|
|
# if LG_SIZEOF_PTR == 3
|
|
|
|
# define RTREE_HEIGHT_MAX 4
|
|
|
|
# elif LG_SIZEOF_PTR == 2
|
|
|
|
# define RTREE_HEIGHT_MAX 2
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
# error Unsupported LG_RTREE_BITS_PER_LEVEL
|
|
|
|
#endif
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
/* Used for two-stage lock-free node initialization. */
|
|
|
|
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The node allocation callback function's argument is the number of contiguous
|
|
|
|
* rtree_node_elm_t structures to allocate, and the resulting memory must be
|
|
|
|
* zeroed.
|
|
|
|
*/
|
|
|
|
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
|
|
|
|
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
|
2014-01-03 08:08:28 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
struct rtree_node_elm_s {
|
|
|
|
union {
|
2015-05-08 13:35:40 +08:00
|
|
|
void *pun;
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_node_elm_t *child;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
extent_node_t *val;
|
2015-01-31 14:54:08 +08:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rtree_level_s {
|
|
|
|
/*
|
|
|
|
* A non-NULL subtree points to a subtree rooted along the hypothetical
|
|
|
|
* path to the leaf node corresponding to key 0. Depending on what keys
|
|
|
|
* have been used to store to the tree, an arbitrary combination of
|
|
|
|
* subtree pointers may remain NULL.
|
|
|
|
*
|
|
|
|
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
|
|
|
|
* This results in a 3-level tree, and the leftmost leaf can be directly
|
|
|
|
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
|
|
|
|
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
|
|
|
|
* tree can be accessed via subtrees[0].
|
|
|
|
*
|
|
|
|
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
|
|
|
|
*
|
|
|
|
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
|
|
|
|
*
|
|
|
|
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
|
|
|
|
*
|
|
|
|
* This has practical implications on x64, which currently uses only the
|
|
|
|
* lower 47 bits of virtual address space in userland, thus leaving
|
|
|
|
* subtrees[0] unused and avoiding a level of tree traversal.
|
|
|
|
*/
|
2015-05-08 13:35:40 +08:00
|
|
|
union {
|
|
|
|
void *subtree_pun;
|
|
|
|
rtree_node_elm_t *subtree;
|
|
|
|
};
|
2015-01-31 14:54:08 +08:00
|
|
|
/* Number of key bits distinguished by this level. */
|
|
|
|
unsigned bits;
|
|
|
|
/*
|
|
|
|
* Cumulative number of key bits distinguished by traversing to
|
|
|
|
* corresponding tree level.
|
|
|
|
*/
|
|
|
|
unsigned cumbits;
|
|
|
|
};
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
struct rtree_s {
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_node_alloc_t *alloc;
|
|
|
|
rtree_node_dalloc_t *dalloc;
|
|
|
|
unsigned height;
|
|
|
|
/*
|
|
|
|
* Precomputed table used to convert from the number of leading 0 key
|
|
|
|
* bits to which subtree level to start at.
|
|
|
|
*/
|
|
|
|
unsigned start_level[RTREE_HEIGHT_MAX];
|
|
|
|
rtree_level_t levels[RTREE_HEIGHT_MAX];
|
2010-09-06 01:35:13 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
|
|
|
|
rtree_node_dalloc_t *dalloc);
|
2014-01-03 08:08:28 +08:00
|
|
|
void rtree_delete(rtree_t *rtree);
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
|
|
|
|
unsigned level);
|
|
|
|
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
|
|
|
|
rtree_node_elm_t *elm, unsigned level);
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2015-01-31 14:54:08 +08:00
|
|
|
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
|
|
|
|
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
|
|
|
|
|
|
|
|
bool rtree_node_valid(rtree_node_elm_t *node);
|
2016-03-23 08:54:35 +08:00
|
|
|
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
|
|
|
|
bool dependent);
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
2016-03-23 08:54:35 +08:00
|
|
|
unsigned level, bool dependent);
|
2015-05-16 08:02:30 +08:00
|
|
|
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
|
|
|
bool dependent);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
|
|
|
|
const extent_node_t *val);
|
2016-03-23 08:54:35 +08:00
|
|
|
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
|
|
|
|
bool dependent);
|
|
|
|
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
|
|
|
|
bool dependent);
|
2015-01-31 14:54:08 +08:00
|
|
|
|
2015-05-16 08:02:30 +08:00
|
|
|
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
|
2010-09-06 01:35:13 +08:00
|
|
|
#endif
|
|
|
|
|
2011-03-19 08:56:14 +08:00
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE unsigned
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_start_level(rtree_t *rtree, uintptr_t key)
|
|
|
|
{
|
|
|
|
unsigned start_level;
|
|
|
|
|
|
|
|
if (unlikely(key == 0))
|
|
|
|
return (rtree->height - 1);
|
|
|
|
|
|
|
|
start_level = rtree->start_level[lg_floor(key) >>
|
|
|
|
LG_RTREE_BITS_PER_LEVEL];
|
|
|
|
assert(start_level < rtree->height);
|
|
|
|
return (start_level);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
|
|
|
|
{
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
|
|
|
rtree->levels[level].cumbits)) & ((ZU(1) <<
|
|
|
|
rtree->levels[level].bits) - 1));
|
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_node_valid(rtree_node_elm_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
|
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
2016-03-23 08:54:35 +08:00
|
|
|
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
rtree_node_elm_t *child;
|
|
|
|
|
|
|
|
/* Double-checked read (first read may be stale. */
|
|
|
|
child = elm->child;
|
2016-03-23 08:54:35 +08:00
|
|
|
if (!dependent && !rtree_node_valid(child))
|
2015-05-08 13:35:40 +08:00
|
|
|
child = atomic_read_p(&elm->pun);
|
2016-03-23 08:54:35 +08:00
|
|
|
assert(!dependent || child != NULL);
|
2015-01-31 14:54:08 +08:00
|
|
|
return (child);
|
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
2016-03-23 08:54:35 +08:00
|
|
|
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
|
|
|
|
bool dependent)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
rtree_node_elm_t *child;
|
|
|
|
|
2016-03-23 08:54:35 +08:00
|
|
|
child = rtree_child_tryread(elm, dependent);
|
|
|
|
if (!dependent && unlikely(!rtree_node_valid(child)))
|
2015-01-31 14:54:08 +08:00
|
|
|
child = rtree_child_read_hard(rtree, elm, level);
|
2016-03-23 08:54:35 +08:00
|
|
|
assert(!dependent || child != NULL);
|
2015-01-31 14:54:08 +08:00
|
|
|
return (child);
|
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE extent_node_t *
|
2015-05-16 08:02:30 +08:00
|
|
|
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
|
2015-05-16 08:02:30 +08:00
|
|
|
if (dependent) {
|
|
|
|
/*
|
|
|
|
* Reading a val on behalf of a pointer to a valid allocation is
|
|
|
|
* guaranteed to be a clean read even without synchronization,
|
|
|
|
* because the rtree update became visible in memory before the
|
|
|
|
* pointer came into existence.
|
|
|
|
*/
|
|
|
|
return (elm->val);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
|
|
|
* dependent on a previous rtree write, which means a stale read
|
|
|
|
* could result if synchronization were omitted here.
|
|
|
|
*/
|
|
|
|
return (atomic_read_p(&elm->pun));
|
|
|
|
}
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
|
2015-05-08 13:35:40 +08:00
|
|
|
atomic_write_p(&elm->pun, val);
|
2015-01-31 14:54:08 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
2016-03-23 08:54:35 +08:00
|
|
|
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
rtree_node_elm_t *subtree;
|
|
|
|
|
|
|
|
/* Double-checked read (first read may be stale. */
|
|
|
|
subtree = rtree->levels[level].subtree;
|
2016-03-23 08:54:35 +08:00
|
|
|
if (!dependent && unlikely(!rtree_node_valid(subtree)))
|
2015-05-08 13:35:40 +08:00
|
|
|
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
|
2016-03-23 08:54:35 +08:00
|
|
|
assert(!dependent || subtree != NULL);
|
2015-01-31 14:54:08 +08:00
|
|
|
return (subtree);
|
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
2016-03-23 08:54:35 +08:00
|
|
|
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
rtree_node_elm_t *subtree;
|
|
|
|
|
2016-03-23 08:54:35 +08:00
|
|
|
subtree = rtree_subtree_tryread(rtree, level, dependent);
|
|
|
|
if (!dependent && unlikely(!rtree_node_valid(subtree)))
|
2015-01-31 14:54:08 +08:00
|
|
|
subtree = rtree_subtree_read_hard(rtree, level);
|
2016-03-23 08:54:35 +08:00
|
|
|
assert(!dependent || subtree != NULL);
|
2015-01-31 14:54:08 +08:00
|
|
|
return (subtree);
|
|
|
|
}
|
|
|
|
|
2016-03-24 07:14:41 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE extent_node_t *
|
2015-05-16 08:02:30 +08:00
|
|
|
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
|
2010-09-06 01:35:13 +08:00
|
|
|
{
|
|
|
|
uintptr_t subkey;
|
2016-03-23 08:54:35 +08:00
|
|
|
unsigned start_level;
|
|
|
|
rtree_node_elm_t *node;
|
2015-01-31 14:54:08 +08:00
|
|
|
|
|
|
|
start_level = rtree_start_level(rtree, key);
|
|
|
|
|
2016-03-23 08:54:35 +08:00
|
|
|
node = rtree_subtree_tryread(rtree, start_level, dependent);
|
|
|
|
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
|
|
|
|
switch (start_level + RTREE_GET_BIAS) {
|
|
|
|
#define RTREE_GET_SUBTREE(level) \
|
|
|
|
case level: \
|
|
|
|
assert(level < (RTREE_HEIGHT_MAX-1)); \
|
|
|
|
if (!dependent && unlikely(!rtree_node_valid(node))) \
|
|
|
|
return (NULL); \
|
|
|
|
subkey = rtree_subkey(rtree, key, level - \
|
|
|
|
RTREE_GET_BIAS); \
|
|
|
|
node = rtree_child_tryread(&node[subkey], dependent); \
|
|
|
|
/* Fall through. */
|
|
|
|
#define RTREE_GET_LEAF(level) \
|
|
|
|
case level: \
|
|
|
|
assert(level == (RTREE_HEIGHT_MAX-1)); \
|
|
|
|
if (!dependent && unlikely(!rtree_node_valid(node))) \
|
|
|
|
return (NULL); \
|
|
|
|
subkey = rtree_subkey(rtree, key, level - \
|
|
|
|
RTREE_GET_BIAS); \
|
|
|
|
/* \
|
|
|
|
* node is a leaf, so it contains values rather than \
|
|
|
|
* child pointers. \
|
|
|
|
*/ \
|
|
|
|
return (rtree_val_read(rtree, &node[subkey], \
|
|
|
|
dependent));
|
|
|
|
#if RTREE_HEIGHT_MAX > 1
|
|
|
|
RTREE_GET_SUBTREE(0)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 2
|
|
|
|
RTREE_GET_SUBTREE(1)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 3
|
|
|
|
RTREE_GET_SUBTREE(2)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 4
|
|
|
|
RTREE_GET_SUBTREE(3)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 5
|
|
|
|
RTREE_GET_SUBTREE(4)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 6
|
|
|
|
RTREE_GET_SUBTREE(5)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 7
|
|
|
|
RTREE_GET_SUBTREE(6)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 8
|
|
|
|
RTREE_GET_SUBTREE(7)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 9
|
|
|
|
RTREE_GET_SUBTREE(8)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 10
|
|
|
|
RTREE_GET_SUBTREE(9)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 11
|
|
|
|
RTREE_GET_SUBTREE(10)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 12
|
|
|
|
RTREE_GET_SUBTREE(11)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 13
|
|
|
|
RTREE_GET_SUBTREE(12)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 14
|
|
|
|
RTREE_GET_SUBTREE(13)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 15
|
|
|
|
RTREE_GET_SUBTREE(14)
|
|
|
|
#endif
|
|
|
|
#if RTREE_HEIGHT_MAX > 16
|
|
|
|
# error Unsupported RTREE_HEIGHT_MAX
|
|
|
|
#endif
|
|
|
|
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
|
|
|
|
#undef RTREE_GET_SUBTREE
|
|
|
|
#undef RTREE_GET_LEAF
|
|
|
|
default: not_reached();
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
2016-03-24 07:14:41 +08:00
|
|
|
#undef RTREE_GET_BIAS
|
2015-01-31 14:54:08 +08:00
|
|
|
not_reached();
|
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
|
2015-01-31 14:54:08 +08:00
|
|
|
{
|
|
|
|
uintptr_t subkey;
|
|
|
|
unsigned i, start_level;
|
|
|
|
rtree_node_elm_t *node, *child;
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
start_level = rtree_start_level(rtree, key);
|
|
|
|
|
2016-03-23 08:54:35 +08:00
|
|
|
node = rtree_subtree_read(rtree, start_level, false);
|
2015-01-31 14:54:08 +08:00
|
|
|
if (node == NULL)
|
|
|
|
return (true);
|
|
|
|
for (i = start_level; /**/; i++, node = child) {
|
|
|
|
subkey = rtree_subkey(rtree, key, i);
|
|
|
|
if (i == rtree->height - 1) {
|
|
|
|
/*
|
|
|
|
* node is a leaf, so it contains values rather than
|
|
|
|
* child pointers.
|
|
|
|
*/
|
|
|
|
rtree_val_write(rtree, &node[subkey], val);
|
|
|
|
return (false);
|
|
|
|
}
|
2015-03-12 14:14:50 +08:00
|
|
|
assert(i + 1 < rtree->height);
|
2016-03-23 08:54:35 +08:00
|
|
|
child = rtree_child_read(rtree, &node[subkey], i, false);
|
2015-01-31 14:54:08 +08:00
|
|
|
if (child == NULL)
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
not_reached();
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|