2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
|
|
|
typedef struct extent_node_s extent_node_t;
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
/* Tree of extents. Use accessor functions for en_* fields. */
|
2010-01-17 01:53:50 +08:00
|
|
|
struct extent_node_s {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/* Arena from which this extent came, if any. */
|
2015-02-16 10:04:46 +08:00
|
|
|
arena_t *en_arena;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Pointer to the extent that this tree node is responsible for. */
|
2015-02-16 10:04:46 +08:00
|
|
|
void *en_addr;
|
|
|
|
|
|
|
|
/* Total region size. */
|
|
|
|
size_t en_size;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
/*
|
|
|
|
* The zeroed flag is used by chunk recycling code to track whether
|
|
|
|
* memory is zero-filled.
|
|
|
|
*/
|
|
|
|
bool en_zeroed;
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/*
|
|
|
|
* True if physical memory is committed to the extent, whether
|
|
|
|
* explicitly or implicitly as on a system that overcommits and
|
2015-08-10 07:47:27 +08:00
|
|
|
* satisfies physical memory needs on demand via soft page faults.
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
*/
|
|
|
|
bool en_committed;
|
|
|
|
|
2015-01-31 13:21:16 +08:00
|
|
|
/*
|
2015-02-16 10:04:46 +08:00
|
|
|
* The achunk flag is used to validate that huge allocation lookups
|
|
|
|
* don't return arena chunks.
|
2015-01-31 13:21:16 +08:00
|
|
|
*/
|
2015-02-16 10:04:46 +08:00
|
|
|
bool en_achunk;
|
|
|
|
|
2015-02-18 07:13:52 +08:00
|
|
|
/* Profile counters, used for huge objects. */
|
|
|
|
prof_tctx_t *en_prof_tctx;
|
|
|
|
|
2015-02-18 17:15:50 +08:00
|
|
|
/* Linkage for arena's runs_dirty and chunks_cache rings. */
|
2015-03-11 09:29:49 +08:00
|
|
|
arena_runs_dirty_link_t rd;
|
2015-02-18 17:15:50 +08:00
|
|
|
qr(extent_node_t) cc_link;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
|
|
|
union {
|
|
|
|
/* Linkage for the size/address-ordered tree. */
|
2015-02-16 08:43:52 +08:00
|
|
|
rb_node(extent_node_t) szad_link;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
2016-04-23 05:37:17 +08:00
|
|
|
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
2015-02-16 08:43:52 +08:00
|
|
|
ql_elm(extent_node_t) ql_link;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Linkage for the address-ordered tree. */
|
2015-02-16 08:43:52 +08:00
|
|
|
rb_node(extent_node_t) ad_link;
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
|
|
|
typedef rb_tree(extent_node_t) extent_tree_t;
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
|
|
|
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
|
|
|
|
|
|
|
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
|
|
|
arena_t *extent_node_arena_get(const extent_node_t *node);
|
|
|
|
void *extent_node_addr_get(const extent_node_t *node);
|
|
|
|
size_t extent_node_size_get(const extent_node_t *node);
|
|
|
|
bool extent_node_zeroed_get(const extent_node_t *node);
|
2015-08-05 01:49:46 +08:00
|
|
|
bool extent_node_committed_get(const extent_node_t *node);
|
2015-02-16 10:04:46 +08:00
|
|
|
bool extent_node_achunk_get(const extent_node_t *node);
|
|
|
|
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
|
|
|
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
|
|
|
void extent_node_addr_set(extent_node_t *node, void *addr);
|
|
|
|
void extent_node_size_set(extent_node_t *node, size_t size);
|
|
|
|
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
2015-08-05 01:49:46 +08:00
|
|
|
void extent_node_committed_set(extent_node_t *node, bool committed);
|
2015-02-16 10:04:46 +08:00
|
|
|
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
|
|
|
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
2015-02-18 07:13:52 +08:00
|
|
|
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
2015-08-05 01:49:46 +08:00
|
|
|
size_t size, bool zeroed, bool committed);
|
2015-02-18 14:23:10 +08:00
|
|
|
void extent_node_dirty_linkage_init(extent_node_t *node);
|
2015-02-18 17:15:50 +08:00
|
|
|
void extent_node_dirty_insert(extent_node_t *node,
|
2015-03-11 09:15:40 +08:00
|
|
|
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
2015-02-18 17:15:50 +08:00
|
|
|
void extent_node_dirty_remove(extent_node_t *node);
|
2015-02-16 10:04:46 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
|
|
|
JEMALLOC_INLINE arena_t *
|
|
|
|
extent_node_arena_get(const extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (node->en_arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
extent_node_addr_get(const extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (node->en_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE size_t
|
|
|
|
extent_node_size_get(const extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (node->en_size);
|
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2015-08-05 01:49:46 +08:00
|
|
|
extent_node_zeroed_get(const extent_node_t *node)
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
{
|
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
return (node->en_zeroed);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2015-08-05 01:49:46 +08:00
|
|
|
extent_node_committed_get(const extent_node_t *node)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2015-08-11 14:42:33 +08:00
|
|
|
assert(!node->en_achunk);
|
2015-08-05 01:49:46 +08:00
|
|
|
return (node->en_committed);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
extent_node_achunk_get(const extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (node->en_achunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE prof_tctx_t *
|
|
|
|
extent_node_prof_tctx_get(const extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (node->en_prof_tctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_arena_set(extent_node_t *node, arena_t *arena)
|
|
|
|
{
|
|
|
|
|
|
|
|
node->en_arena = arena;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_addr_set(extent_node_t *node, void *addr)
|
|
|
|
{
|
|
|
|
|
|
|
|
node->en_addr = addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_size_set(extent_node_t *node, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
node->en_size = size;
|
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
JEMALLOC_INLINE void
|
2015-08-05 01:49:46 +08:00
|
|
|
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
{
|
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
node->en_zeroed = zeroed;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE void
|
2015-08-05 01:49:46 +08:00
|
|
|
extent_node_committed_set(extent_node_t *node, bool committed)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
node->en_committed = committed;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_achunk_set(extent_node_t *node, bool achunk)
|
|
|
|
{
|
|
|
|
|
|
|
|
node->en_achunk = achunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
node->en_prof_tctx = tctx;
|
|
|
|
}
|
2015-02-18 07:13:52 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
2015-08-05 01:49:46 +08:00
|
|
|
bool zeroed, bool committed)
|
2015-02-18 07:13:52 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
extent_node_arena_set(node, arena);
|
|
|
|
extent_node_addr_set(node, addr);
|
|
|
|
extent_node_size_set(node, size);
|
|
|
|
extent_node_zeroed_set(node, zeroed);
|
2015-08-05 01:49:46 +08:00
|
|
|
extent_node_committed_set(node, committed);
|
2015-02-18 07:13:52 +08:00
|
|
|
extent_node_achunk_set(node, false);
|
|
|
|
if (config_prof)
|
|
|
|
extent_node_prof_tctx_set(node, NULL);
|
|
|
|
}
|
2015-02-18 14:23:10 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_dirty_linkage_init(extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
2015-03-11 09:29:49 +08:00
|
|
|
qr_new(&node->rd, rd_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
qr_new(node, cc_link);
|
2015-02-18 14:23:10 +08:00
|
|
|
}
|
2015-02-18 17:15:50 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_dirty_insert(extent_node_t *node,
|
2015-03-11 09:15:40 +08:00
|
|
|
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
|
2015-02-18 17:15:50 +08:00
|
|
|
{
|
|
|
|
|
2015-03-11 09:29:49 +08:00
|
|
|
qr_meld(runs_dirty, &node->rd, rd_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
qr_meld(chunks_dirty, node, cc_link);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_node_dirty_remove(extent_node_t *node)
|
|
|
|
{
|
|
|
|
|
2015-03-11 09:29:49 +08:00
|
|
|
qr_remove(&node->rd, rd_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
qr_remove(node, cc_link);
|
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|
|
|
|
|