2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
typedef struct extent_s extent_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
#define EXTENT_HOOKS_INITIALIZER NULL
|
2016-06-02 02:56:45 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
|
|
|
struct extent_s {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/* Arena from which this extent came, if any. */
|
2016-03-24 12:09:28 +08:00
|
|
|
arena_t *e_arena;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
/* Pointer to the extent that this structure is responsible for. */
|
|
|
|
void *e_addr;
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
/* Extent size. */
|
2016-03-24 12:09:28 +08:00
|
|
|
size_t e_size;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
/*
|
|
|
|
* Usable size, typically smaller than extent size due to large_pad or
|
|
|
|
* promotion of sampled small regions.
|
|
|
|
*/
|
|
|
|
size_t e_usize;
|
|
|
|
|
2016-03-28 18:17:10 +08:00
|
|
|
/* True if extent is active (in use). */
|
|
|
|
bool e_active;
|
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
/*
|
2016-06-02 03:59:02 +08:00
|
|
|
* The zeroed flag is used by extent recycling code to track whether
|
2015-08-05 01:49:46 +08:00
|
|
|
* memory is zero-filled.
|
|
|
|
*/
|
2016-03-24 12:09:28 +08:00
|
|
|
bool e_zeroed;
|
2015-08-05 01:49:46 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/*
|
|
|
|
* True if physical memory is committed to the extent, whether
|
|
|
|
* explicitly or implicitly as on a system that overcommits and
|
2015-08-10 07:47:27 +08:00
|
|
|
* satisfies physical memory needs on demand via soft page faults.
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
*/
|
2016-03-24 12:09:28 +08:00
|
|
|
bool e_committed;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
2015-01-31 13:21:16 +08:00
|
|
|
/*
|
2016-04-07 22:24:14 +08:00
|
|
|
* The slab flag indicates whether the extent is used for a slab of
|
|
|
|
* small regions. This helps differentiate small size classes, and it
|
|
|
|
* indicates whether interior pointers can be looked up via iealloc().
|
2015-01-31 13:21:16 +08:00
|
|
|
*/
|
2016-04-07 22:24:14 +08:00
|
|
|
bool e_slab;
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2016-05-25 12:13:36 +08:00
|
|
|
union {
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Small region slab metadata. */
|
|
|
|
arena_slab_data_t e_slab_data;
|
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
/* Profile counters, used for large objects. */
|
2016-05-30 09:34:50 +08:00
|
|
|
union {
|
|
|
|
void *e_prof_tctx_pun;
|
|
|
|
prof_tctx_t *e_prof_tctx;
|
|
|
|
};
|
2016-05-25 12:13:36 +08:00
|
|
|
};
|
2015-02-18 07:13:52 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/*
|
|
|
|
* Linkage for arena's extents_dirty and arena_bin_t's slabs_full rings.
|
|
|
|
*/
|
|
|
|
qr(extent_t) qr_link;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
|
|
|
union {
|
2016-05-18 05:58:56 +08:00
|
|
|
/* Linkage for per size class address-ordered heaps. */
|
|
|
|
phn(extent_t) ph_link;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
/* Linkage for arena's large and extent_cache lists. */
|
2016-03-24 12:09:28 +08:00
|
|
|
ql_elm(extent_t) ql_link;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
};
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
2016-05-18 05:58:56 +08:00
|
|
|
typedef ph(extent_t) extent_heap_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
extern rtree_t extents_rtree;
|
|
|
|
extern const extent_hooks_t extent_hooks_default;
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2016-05-24 05:56:35 +08:00
|
|
|
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
|
|
|
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t *extent_hooks_get(arena_t *arena);
|
|
|
|
extent_hooks_t *extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks);
|
2016-06-02 03:59:02 +08:00
|
|
|
|
2016-05-18 05:58:56 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
typedef size_t (extent_size_quantize_t)(size_t);
|
|
|
|
extern extent_size_quantize_t *extent_size_quantize_floor;
|
|
|
|
extern extent_size_quantize_t *extent_size_quantize_ceil;
|
|
|
|
#else
|
|
|
|
size_t extent_size_quantize_floor(size_t size);
|
|
|
|
size_t extent_size_quantize_ceil(size_t size);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t alignment, bool *zero, bool slab);
|
|
|
|
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t alignment, bool *zero, bool *commit, bool slab);
|
|
|
|
void extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
2016-06-02 03:59:02 +08:00
|
|
|
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t length);
|
|
|
|
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t length);
|
|
|
|
bool extent_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t length);
|
|
|
|
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
2016-06-02 03:59:02 +08:00
|
|
|
size_t usize_a, size_t size_b, size_t usize_b);
|
|
|
|
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
|
2016-06-02 03:59:02 +08:00
|
|
|
void extent_prefork(tsdn_t *tsdn);
|
|
|
|
void extent_postfork_parent(tsdn_t *tsdn);
|
|
|
|
void extent_postfork_child(tsdn_t *tsdn);
|
|
|
|
|
2016-06-02 03:10:39 +08:00
|
|
|
bool extent_boot(void);
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_t *extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent);
|
2016-03-24 12:09:28 +08:00
|
|
|
arena_t *extent_arena_get(const extent_t *extent);
|
2016-05-28 09:57:15 +08:00
|
|
|
void *extent_base_get(const extent_t *extent);
|
2016-03-24 12:09:28 +08:00
|
|
|
void *extent_addr_get(const extent_t *extent);
|
|
|
|
size_t extent_size_get(const extent_t *extent);
|
2016-05-28 09:57:15 +08:00
|
|
|
size_t extent_usize_get(const extent_t *extent);
|
2016-05-27 13:12:38 +08:00
|
|
|
void *extent_before_get(const extent_t *extent);
|
|
|
|
void *extent_last_get(const extent_t *extent);
|
2016-04-07 22:34:26 +08:00
|
|
|
void *extent_past_get(const extent_t *extent);
|
2016-03-28 18:17:10 +08:00
|
|
|
bool extent_active_get(const extent_t *extent);
|
2016-05-17 04:25:18 +08:00
|
|
|
bool extent_retained_get(const extent_t *extent);
|
2016-03-24 12:09:28 +08:00
|
|
|
bool extent_zeroed_get(const extent_t *extent);
|
|
|
|
bool extent_committed_get(const extent_t *extent);
|
2016-04-07 22:24:14 +08:00
|
|
|
bool extent_slab_get(const extent_t *extent);
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_data_t *extent_slab_data_get(extent_t *extent);
|
|
|
|
const arena_slab_data_t *extent_slab_data_get_const(const extent_t *extent);
|
2016-03-24 12:09:28 +08:00
|
|
|
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
|
|
|
void extent_arena_set(extent_t *extent, arena_t *arena);
|
|
|
|
void extent_addr_set(extent_t *extent, void *addr);
|
2016-05-28 09:57:15 +08:00
|
|
|
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
2016-03-24 12:09:28 +08:00
|
|
|
void extent_size_set(extent_t *extent, size_t size);
|
2016-05-28 15:17:28 +08:00
|
|
|
void extent_usize_set(extent_t *extent, size_t usize);
|
2016-03-28 18:17:10 +08:00
|
|
|
void extent_active_set(extent_t *extent, bool active);
|
2016-03-24 12:09:28 +08:00
|
|
|
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
|
|
|
void extent_committed_set(extent_t *extent, bool committed);
|
2016-04-07 22:24:14 +08:00
|
|
|
void extent_slab_set(extent_t *extent, bool slab);
|
2016-03-24 12:09:28 +08:00
|
|
|
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
|
|
|
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
2016-05-30 09:34:50 +08:00
|
|
|
size_t size, size_t usize, bool active, bool zeroed, bool committed,
|
|
|
|
bool slab);
|
|
|
|
void extent_ring_insert(extent_t *sentinel, extent_t *extent);
|
|
|
|
void extent_ring_remove(extent_t *extent);
|
2015-02-16 10:04:46 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
2016-06-02 03:10:39 +08:00
|
|
|
JEMALLOC_INLINE extent_t *
|
|
|
|
extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
|
|
|
|
{
|
2016-06-03 09:43:10 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
2016-06-02 03:10:39 +08:00
|
|
|
|
2016-06-03 09:43:10 +08:00
|
|
|
return (rtree_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
|
|
|
dependent));
|
2016-06-02 03:10:39 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE arena_t *
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_arena_get(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
return (extent->e_arena);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
extent_base_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
|
|
|
!extent->e_slab);
|
|
|
|
return (PAGE_ADDR2BASE(extent->e_addr));
|
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE void *
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_addr_get(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
|
|
|
!extent->e_slab);
|
2016-03-24 12:09:28 +08:00
|
|
|
return (extent->e_addr);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE size_t
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_size_get(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
return (extent->e_size);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
JEMALLOC_INLINE size_t
|
|
|
|
extent_usize_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(!extent->e_slab);
|
2016-05-28 15:17:28 +08:00
|
|
|
return (extent->e_usize);
|
2016-05-28 09:57:15 +08:00
|
|
|
}
|
|
|
|
|
2016-05-27 13:12:38 +08:00
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
extent_before_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((void *)(uintptr_t)extent->e_addr - PAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
extent_last_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent) -
|
|
|
|
PAGE);
|
2016-05-27 13:12:38 +08:00
|
|
|
}
|
|
|
|
|
2016-04-07 22:34:26 +08:00
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
extent_past_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent));
|
2016-04-07 22:34:26 +08:00
|
|
|
}
|
|
|
|
|
2016-03-28 18:17:10 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
extent_active_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (extent->e_active);
|
|
|
|
}
|
|
|
|
|
2016-05-17 04:25:18 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
extent_retained_get(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
return (qr_next(extent, qr_link) == extent);
|
2016-05-17 04:25:18 +08:00
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_zeroed_get(const extent_t *extent)
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
return (extent->e_zeroed);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_committed_get(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
return (extent->e_committed);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-04-07 22:24:14 +08:00
|
|
|
extent_slab_get(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
return (extent->e_slab);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
JEMALLOC_INLINE arena_slab_data_t *
|
|
|
|
extent_slab_data_get(extent_t *extent)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(extent->e_slab);
|
|
|
|
return (&extent->e_slab_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE const arena_slab_data_t *
|
|
|
|
extent_slab_data_get_const(const extent_t *extent)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(extent->e_slab);
|
|
|
|
return (&extent->e_slab_data);
|
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE prof_tctx_t *
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_prof_tctx_get(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-05-25 12:13:36 +08:00
|
|
|
return ((prof_tctx_t *)atomic_read_p(
|
|
|
|
&((extent_t *)extent)->e_prof_tctx_pun));
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_arena_set(extent_t *extent, arena_t *arena)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent->e_arena = arena;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_addr_set(extent_t *extent, void *addr)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent->e_addr = addr;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(extent_base_get(extent) == extent_addr_get(extent));
|
|
|
|
|
|
|
|
if (alignment < PAGE) {
|
|
|
|
unsigned lg_range = LG_PAGE -
|
|
|
|
lg_floor(CACHELINE_CEILING(alignment));
|
|
|
|
uint64_t r =
|
|
|
|
prng_lg_range(&extent_arena_get(extent)->offset_state,
|
|
|
|
lg_range, true);
|
2016-05-28 15:17:28 +08:00
|
|
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
|
|
|
lg_range);
|
2016-05-28 09:57:15 +08:00
|
|
|
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
|
|
|
random_offset);
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
|
|
|
extent->e_addr);
|
2016-05-28 09:57:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_size_set(extent_t *extent, size_t size)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent->e_size = size;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_usize_set(extent_t *extent, size_t usize)
|
|
|
|
{
|
|
|
|
|
|
|
|
extent->e_usize = usize;
|
|
|
|
}
|
|
|
|
|
2016-03-28 18:17:10 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
extent_active_set(extent_t *extent, bool active)
|
|
|
|
{
|
|
|
|
|
|
|
|
extent->e_active = active;
|
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_zeroed_set(extent_t *extent, bool zeroed)
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent->e_zeroed = zeroed;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_committed_set(extent_t *extent, bool committed)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent->e_committed = committed;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-04-07 22:24:14 +08:00
|
|
|
extent_slab_set(extent_t *extent, bool slab)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
extent->e_slab = slab;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-05-25 12:13:36 +08:00
|
|
|
atomic_write_p(&extent->e_prof_tctx_pun, tctx);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
2015-02-18 07:13:52 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
2016-05-30 09:34:50 +08:00
|
|
|
size_t usize, bool active, bool zeroed, bool committed, bool slab)
|
2015-02-18 07:13:52 +08:00
|
|
|
{
|
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_arena_set(extent, arena);
|
|
|
|
extent_addr_set(extent, addr);
|
|
|
|
extent_size_set(extent, size);
|
2016-05-28 15:17:28 +08:00
|
|
|
extent_usize_set(extent, usize);
|
2016-03-28 18:17:10 +08:00
|
|
|
extent_active_set(extent, active);
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_zeroed_set(extent, zeroed);
|
|
|
|
extent_committed_set(extent, committed);
|
2016-04-07 22:24:14 +08:00
|
|
|
extent_slab_set(extent, slab);
|
2015-02-18 07:13:52 +08:00
|
|
|
if (config_prof)
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_prof_tctx_set(extent, NULL);
|
2016-05-30 09:34:50 +08:00
|
|
|
qr_new(extent, qr_link);
|
2015-02-18 14:23:10 +08:00
|
|
|
}
|
2015-02-18 17:15:50 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_ring_insert(extent_t *sentinel, extent_t *extent)
|
2015-02-18 17:15:50 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
qr_meld(sentinel, extent, qr_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_ring_remove(extent_t *extent)
|
2015-02-18 17:15:50 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
qr_remove(extent, qr_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
}
|
2015-02-16 10:04:46 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
|
|
|
#include "jemalloc/internal/extent_mmap.h"
|