2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
|
|
|
|
2011-03-23 00:00:56 +08:00
|
|
|
/* Maximum number of regions in one run. */
|
2014-09-29 16:31:39 +08:00
|
|
|
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
2011-03-23 00:00:56 +08:00
|
|
|
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
/*
|
|
|
|
* Minimum redzone size. Redzones may be larger than this if necessary to
|
|
|
|
* preserve region alignment.
|
|
|
|
*/
|
|
|
|
#define REDZONE_MINSIZE 16
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* The minimum ratio of active:dirty pages per arena is computed as:
|
|
|
|
*
|
|
|
|
* (nactive >> opt_lg_dirty_mult) >= ndirty
|
|
|
|
*
|
2012-10-31 06:42:37 +08:00
|
|
|
* So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
|
|
|
|
* as many active pages as dirty pages.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2012-10-31 06:42:37 +08:00
|
|
|
#define LG_DIRTY_MULT_DEFAULT 3
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
typedef struct arena_run_s arena_run_t;
|
2014-08-30 04:34:40 +08:00
|
|
|
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
|
|
|
|
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
typedef struct arena_chunk_s arena_chunk_t;
|
2011-03-16 04:59:15 +08:00
|
|
|
typedef struct arena_bin_info_s arena_bin_info_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
typedef struct arena_bin_s arena_bin_t;
|
|
|
|
typedef struct arena_s arena_t;
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
struct arena_run_s {
|
2014-10-11 14:01:03 +08:00
|
|
|
/* Index of bin this run is associated with. */
|
|
|
|
index_t binind;
|
2014-09-29 16:31:39 +08:00
|
|
|
|
|
|
|
/* Number of free regions in run. */
|
|
|
|
unsigned nfree;
|
|
|
|
|
|
|
|
/* Per region allocated/deallocated bitmap. */
|
|
|
|
bitmap_t bitmap[BITMAP_GROUPS_MAX];
|
|
|
|
};
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Each element of the chunk map corresponds to one page within the chunk. */
|
2014-08-30 04:34:40 +08:00
|
|
|
struct arena_chunk_map_bits_s {
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Run address (or size) and various flags are stored together. The bit
|
|
|
|
* layout looks like (assuming 32-bit system):
|
|
|
|
*
|
2012-05-10 15:18:46 +08:00
|
|
|
* ???????? ???????? ????nnnn nnnndula
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
|
|
|
* ? : Unallocated: Run address for first/last pages, unset for internal
|
|
|
|
* pages.
|
2010-03-19 11:36:40 +08:00
|
|
|
* Small: Run page offset.
|
2010-01-17 01:53:50 +08:00
|
|
|
* Large: Run size for first page, unset for trailing pages.
|
2012-05-10 15:18:46 +08:00
|
|
|
* n : binind for small size class, BININD_INVALID for large size class.
|
2010-01-17 01:53:50 +08:00
|
|
|
* d : dirty?
|
2010-12-18 10:07:53 +08:00
|
|
|
* u : unzeroed?
|
2010-01-17 01:53:50 +08:00
|
|
|
* l : large?
|
|
|
|
* a : allocated?
|
|
|
|
*
|
|
|
|
* Following are example bit patterns for the three types of runs.
|
|
|
|
*
|
|
|
|
* p : run page offset
|
|
|
|
* s : run size
|
2012-05-02 15:30:36 +08:00
|
|
|
* n : binind for size class; large objects set these to BININD_INVALID
|
2010-01-17 01:53:50 +08:00
|
|
|
* x : don't care
|
|
|
|
* - : 0
|
2010-04-01 07:45:04 +08:00
|
|
|
* + : 1
|
2010-10-02 08:53:37 +08:00
|
|
|
* [DULA] : bit set
|
|
|
|
* [dula] : bit unset
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
2010-03-19 11:36:40 +08:00
|
|
|
* Unallocated (clean):
|
2012-05-10 15:18:46 +08:00
|
|
|
* ssssssss ssssssss ssss++++ ++++du-a
|
2012-05-02 15:30:36 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
|
2012-05-10 15:18:46 +08:00
|
|
|
* ssssssss ssssssss ssss++++ ++++dU-a
|
2010-03-19 11:36:40 +08:00
|
|
|
*
|
|
|
|
* Unallocated (dirty):
|
2012-05-10 15:18:46 +08:00
|
|
|
* ssssssss ssssssss ssss++++ ++++D--a
|
2012-05-02 15:30:36 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2012-05-10 15:18:46 +08:00
|
|
|
* ssssssss ssssssss ssss++++ ++++D--a
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
2010-03-18 07:27:39 +08:00
|
|
|
* Small:
|
2012-05-02 15:30:36 +08:00
|
|
|
* pppppppp pppppppp ppppnnnn nnnnd--A
|
|
|
|
* pppppppp pppppppp ppppnnnn nnnn---A
|
|
|
|
* pppppppp pppppppp ppppnnnn nnnnd--A
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
|
|
|
* Large:
|
2012-05-10 15:18:46 +08:00
|
|
|
* ssssssss ssssssss ssss++++ ++++D-LA
|
2012-05-02 15:30:36 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2012-05-10 15:18:46 +08:00
|
|
|
* -------- -------- ----++++ ++++D-LA
|
2010-04-01 07:45:04 +08:00
|
|
|
*
|
2014-10-06 08:54:10 +08:00
|
|
|
* Large (sampled, size <= LARGE_MINCLASS):
|
2012-05-02 15:30:36 +08:00
|
|
|
* ssssssss ssssssss ssssnnnn nnnnD-LA
|
2014-10-06 08:54:10 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
|
|
* -------- -------- ----++++ ++++D-LA
|
2010-04-01 07:45:04 +08:00
|
|
|
*
|
2014-10-06 08:54:10 +08:00
|
|
|
* Large (not sampled, size == LARGE_MINCLASS):
|
2012-05-10 15:18:46 +08:00
|
|
|
* ssssssss ssssssss ssss++++ ++++D-LA
|
2014-10-06 08:54:10 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
|
|
* -------- -------- ----++++ ++++D-LA
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
|
|
|
size_t bits;
|
2012-05-02 15:30:36 +08:00
|
|
|
#define CHUNK_MAP_BININD_SHIFT 4
|
|
|
|
#define BININD_INVALID ((size_t)0xffU)
|
|
|
|
/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
|
|
|
|
#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
|
|
|
|
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
|
|
|
|
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
|
2010-04-01 07:45:04 +08:00
|
|
|
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
|
2010-10-02 08:53:37 +08:00
|
|
|
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
|
2010-04-01 07:45:04 +08:00
|
|
|
#define CHUNK_MAP_LARGE ((size_t)0x2U)
|
|
|
|
#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
|
|
|
|
#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
2014-08-30 04:34:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
|
|
|
|
* like arena_chunk_map_bits_t. Two separate arrays are stored within each
|
|
|
|
* chunk header in order to improve cache locality.
|
|
|
|
*/
|
|
|
|
struct arena_chunk_map_misc_s {
|
|
|
|
/*
|
|
|
|
* Linkage for run trees. There are two disjoint uses:
|
|
|
|
*
|
|
|
|
* 1) arena_t's runs_avail tree.
|
|
|
|
* 2) arena_run_t conceptually uses this linkage for in-use non-full
|
|
|
|
* runs, rather than directly embedding linkage.
|
|
|
|
*/
|
2014-09-29 16:31:39 +08:00
|
|
|
rb_node(arena_chunk_map_misc_t) rb_link;
|
2014-08-30 04:34:40 +08:00
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
union {
|
|
|
|
/* Linkage for list of dirty runs. */
|
|
|
|
ql_elm(arena_chunk_map_misc_t) dr_link;
|
2014-08-30 04:34:40 +08:00
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
/* Profile counters, used for large object runs. */
|
|
|
|
prof_tctx_t *prof_tctx;
|
|
|
|
|
|
|
|
/* Small region run metadata. */
|
|
|
|
arena_run_t run;
|
|
|
|
};
|
2014-08-30 04:34:40 +08:00
|
|
|
};
|
|
|
|
typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
|
|
|
|
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
|
|
|
|
typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* Arena chunk header. */
|
|
|
|
struct arena_chunk_s {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/*
|
|
|
|
* The arena that owns the chunk is node.arena. This field as a whole
|
|
|
|
* is used by chunks_rtree to support both ivsalloc() and core-based
|
|
|
|
* debugging.
|
|
|
|
*/
|
|
|
|
extent_node_t node;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2010-10-02 08:35:43 +08:00
|
|
|
/*
|
|
|
|
* Map of pages within chunk that keeps track of free/large/small. The
|
|
|
|
* first map_bias entries are omitted, since the chunk header does not
|
|
|
|
* need to be tracked in the map. This omission saves a header page
|
|
|
|
* for common chunk sizes (e.g. 4 MiB).
|
|
|
|
*/
|
2014-08-30 04:34:40 +08:00
|
|
|
arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
|
|
|
|
2011-03-16 04:59:15 +08:00
|
|
|
/*
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
* Read-only information associated with each element of arena_t's bins array
|
2011-03-16 04:59:15 +08:00
|
|
|
* is stored separately, partly to reduce memory usage (only one copy, rather
|
|
|
|
* than one per arena), but mainly to avoid false cacheline sharing.
|
2012-04-06 15:35:09 +08:00
|
|
|
*
|
|
|
|
* Each run has the following layout:
|
|
|
|
*
|
|
|
|
* /--------------------\
|
2014-09-29 16:31:39 +08:00
|
|
|
* | pad? |
|
2012-04-06 15:35:09 +08:00
|
|
|
* |--------------------|
|
|
|
|
* | redzone |
|
|
|
|
* reg0_offset | region 0 |
|
|
|
|
* | redzone |
|
|
|
|
* |--------------------| \
|
|
|
|
* | redzone | |
|
|
|
|
* | region 1 | > reg_interval
|
|
|
|
* | redzone | /
|
|
|
|
* |--------------------|
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* |--------------------|
|
|
|
|
* | redzone |
|
|
|
|
* | region nregs-1 |
|
|
|
|
* | redzone |
|
|
|
|
* |--------------------|
|
|
|
|
* | alignment pad? |
|
|
|
|
* \--------------------/
|
|
|
|
*
|
|
|
|
* reg_interval has at least the same minimum alignment as reg_size; this
|
|
|
|
* preserves the alignment constraint that sa2u() depends on. Alignment pad is
|
|
|
|
* either 0 or redzone_size; it is present only if needed to align reg0_offset.
|
2011-03-16 04:59:15 +08:00
|
|
|
*/
|
|
|
|
struct arena_bin_info_s {
|
|
|
|
/* Size of regions in a run for this bin's size class. */
|
|
|
|
size_t reg_size;
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
/* Redzone size. */
|
|
|
|
size_t redzone_size;
|
|
|
|
|
|
|
|
/* Interval between regions (reg_size + (redzone_size << 1)). */
|
|
|
|
size_t reg_interval;
|
|
|
|
|
2011-03-16 04:59:15 +08:00
|
|
|
/* Total size of a run for this bin's size class. */
|
|
|
|
size_t run_size;
|
|
|
|
|
|
|
|
/* Total number of regions in a run for this bin's size class. */
|
|
|
|
uint32_t nregs;
|
|
|
|
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/*
|
|
|
|
* Metadata used to manipulate bitmaps for runs associated with this
|
|
|
|
* bin.
|
|
|
|
*/
|
|
|
|
bitmap_info_t bitmap_info;
|
|
|
|
|
2011-03-16 04:59:15 +08:00
|
|
|
/* Offset of first region in a run for this bin's size class. */
|
|
|
|
uint32_t reg0_offset;
|
|
|
|
};
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
struct arena_bin_s {
|
2010-03-14 12:32:56 +08:00
|
|
|
/*
|
|
|
|
* All operations on runcur, runs, and stats require that lock be
|
|
|
|
* locked. Run allocation/deallocation are protected by the arena lock,
|
|
|
|
* which may be acquired while holding one or more bin locks, but not
|
|
|
|
* vise versa.
|
|
|
|
*/
|
|
|
|
malloc_mutex_t lock;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Current run being used to service allocations of this bin's size
|
|
|
|
* class.
|
|
|
|
*/
|
|
|
|
arena_run_t *runcur;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tree of non-full runs. This tree is used when looking for an
|
|
|
|
* existing run when runcur is no longer usable. We choose the
|
|
|
|
* non-full run that is lowest in memory; this policy tends to keep
|
|
|
|
* objects packed well, and it can also help reduce the number of
|
|
|
|
* almost-empty chunks.
|
|
|
|
*/
|
|
|
|
arena_run_tree_t runs;
|
|
|
|
|
|
|
|
/* Bin statistics. */
|
|
|
|
malloc_bin_stats_t stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct arena_s {
|
2010-02-11 02:37:56 +08:00
|
|
|
/* This arena's index within the arenas array. */
|
|
|
|
unsigned ind;
|
|
|
|
|
2010-03-14 12:32:56 +08:00
|
|
|
/*
|
2011-03-19 04:41:33 +08:00
|
|
|
* Number of threads currently assigned to this arena. This field is
|
|
|
|
* protected by arenas_lock.
|
|
|
|
*/
|
|
|
|
unsigned nthreads;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There are three classes of arena operations from a locking
|
|
|
|
* perspective:
|
2014-12-09 06:40:14 +08:00
|
|
|
* 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
|
2011-03-19 04:41:33 +08:00
|
|
|
* 2) Bin-related operations are protected by bin locks.
|
|
|
|
* 3) Chunk- and run-related operations are protected by this mutex.
|
2010-03-14 12:32:56 +08:00
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_t lock;
|
|
|
|
|
|
|
|
arena_stats_t stats;
|
|
|
|
/*
|
|
|
|
* List of tcaches for extant threads associated with this arena.
|
2015-01-30 07:30:47 +08:00
|
|
|
* Stats from these are merged incrementally, and at exit if
|
|
|
|
* opt_stats_print is enabled.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
|
|
|
ql_head(tcache_t) tcache_ql;
|
|
|
|
|
2010-02-12 05:19:21 +08:00
|
|
|
uint64_t prof_accumbytes;
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t dss_prec;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* In order to avoid rapid chunk allocation/deallocation when an arena
|
|
|
|
* oscillates right on the cusp of needing a new chunk, cache the most
|
|
|
|
* recently freed chunk. The spare is left in the arena's chunk trees
|
|
|
|
* until it is deleted.
|
|
|
|
*
|
|
|
|
* There is one spare chunk per arena, rather than one spare total, in
|
|
|
|
* order to avoid interactions between multiple threads that could make
|
|
|
|
* a single spare inadequate.
|
|
|
|
*/
|
|
|
|
arena_chunk_t *spare;
|
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
/* Number of pages in active runs and huge regions. */
|
2010-01-25 08:41:01 +08:00
|
|
|
size_t nactive;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Current count of pages within unused runs that are potentially
|
|
|
|
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
|
|
|
|
* By tracking this, we can institute a limit on how much dirty unused
|
|
|
|
* memory is mapped for each arena.
|
|
|
|
*/
|
|
|
|
size_t ndirty;
|
|
|
|
|
|
|
|
/*
|
2010-03-19 11:36:40 +08:00
|
|
|
* Size/address-ordered trees of this arena's available runs. The trees
|
2012-10-31 06:42:37 +08:00
|
|
|
* are used for first-best-fit run allocation.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2012-10-31 06:42:37 +08:00
|
|
|
arena_avail_tree_t runs_avail;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-08-15 05:45:58 +08:00
|
|
|
/* List of dirty runs this arena manages. */
|
2014-08-30 04:34:40 +08:00
|
|
|
arena_chunk_miscelms_t runs_dirty;
|
2014-08-15 05:45:58 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/* Extant huge allocations. */
|
|
|
|
ql_head(extent_node_t) huge;
|
|
|
|
/* Synchronizes all huge allocation/update/deallocation. */
|
|
|
|
malloc_mutex_t huge_mtx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trees of chunks that were previously allocated (trees differ only in
|
|
|
|
* node ordering). These are used when allocating chunks, in an attempt
|
|
|
|
* to re-use address space. Depending on function, different tree
|
|
|
|
* orderings are needed, which is why there are two trees with the same
|
|
|
|
* contents.
|
|
|
|
*/
|
|
|
|
extent_tree_t chunks_szad_mmap;
|
|
|
|
extent_tree_t chunks_ad_mmap;
|
|
|
|
extent_tree_t chunks_szad_dss;
|
|
|
|
extent_tree_t chunks_ad_dss;
|
|
|
|
malloc_mutex_t chunks_mtx;
|
|
|
|
/* Cache of nodes that were allocated via base_alloc(). */
|
|
|
|
ql_head(extent_node_t) node_cache;
|
|
|
|
malloc_mutex_t node_cache_mtx;
|
|
|
|
|
2014-05-06 06:16:56 +08:00
|
|
|
/*
|
2014-12-09 06:40:14 +08:00
|
|
|
* User-configurable chunk allocation and deallocation functions.
|
2014-05-06 06:16:56 +08:00
|
|
|
*/
|
|
|
|
chunk_alloc_t *chunk_alloc;
|
2014-05-16 13:22:27 +08:00
|
|
|
chunk_dalloc_t *chunk_dalloc;
|
2014-05-06 06:16:56 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
/* bins is used to store trees of free regions. */
|
|
|
|
arena_bin_t bins[NBINS];
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
extern ssize_t opt_lg_dirty_mult;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
extern arena_bin_info_t arena_bin_info[NBINS];
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
|
|
|
extern size_t map_misc_offset;
|
|
|
|
extern size_t arena_maxrun; /* Max run size for arenas. */
|
|
|
|
extern size_t arena_maxclass; /* Max size class for arenas. */
|
2014-10-13 13:53:59 +08:00
|
|
|
extern unsigned nlclasses; /* Number of large size classes. */
|
|
|
|
extern unsigned nhclasses; /* Number of huge size classes. */
|
2010-01-28 05:10:55 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
extent_node_t *arena_node_alloc(arena_t *arena);
|
|
|
|
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
|
2014-10-15 13:20:00 +08:00
|
|
|
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
|
|
|
bool *zero);
|
2014-10-13 13:53:59 +08:00
|
|
|
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
|
2014-10-15 13:20:00 +08:00
|
|
|
void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
|
|
|
|
size_t oldsize, size_t usize);
|
|
|
|
void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
|
|
|
|
size_t oldsize, size_t usize);
|
|
|
|
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
|
|
|
|
size_t oldsize, size_t usize, bool *zero);
|
2010-10-01 07:55:08 +08:00
|
|
|
void arena_purge_all(arena_t *arena);
|
2010-03-18 07:27:39 +08:00
|
|
|
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind, uint64_t prof_accumbytes);
|
2012-04-06 15:35:09 +08:00
|
|
|
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
|
|
|
bool zero);
|
2013-12-18 07:14:36 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
|
|
|
|
uint8_t);
|
2014-01-08 08:47:56 +08:00
|
|
|
extern arena_redzone_corruption_t *arena_redzone_corruption;
|
|
|
|
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
|
|
|
|
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
|
|
|
|
#else
|
2012-04-06 15:35:09 +08:00
|
|
|
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
|
2014-01-08 08:47:56 +08:00
|
|
|
#endif
|
2013-12-18 07:14:36 +08:00
|
|
|
void arena_quarantine_junk_small(void *ptr, size_t usize);
|
2010-01-17 01:53:50 +08:00
|
|
|
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
|
2010-03-18 07:27:39 +08:00
|
|
|
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
|
2015-02-13 06:06:37 +08:00
|
|
|
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
|
|
|
|
size_t alignment, bool zero, tcache_t *tcache);
|
2010-04-01 07:45:04 +08:00
|
|
|
void arena_prof_promoted(const void *ptr, size_t size);
|
2014-10-10 08:54:06 +08:00
|
|
|
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
|
|
|
|
void *ptr, arena_chunk_map_bits_t *bitselm);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
2014-08-30 04:34:40 +08:00
|
|
|
size_t pageind, arena_chunk_map_bits_t *bitselm);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|
|
|
size_t pageind);
|
2014-01-08 08:47:56 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
|
|
|
|
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
|
2014-10-10 08:54:06 +08:00
|
|
|
#else
|
|
|
|
void arena_dalloc_junk_large(void *ptr, size_t usize);
|
2014-01-08 08:47:56 +08:00
|
|
|
#endif
|
2014-10-10 08:54:06 +08:00
|
|
|
void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
|
2012-05-02 15:30:36 +08:00
|
|
|
void *ptr);
|
2010-01-17 01:53:50 +08:00
|
|
|
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
2014-01-08 08:47:56 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
|
|
|
|
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
|
|
|
|
#endif
|
2014-01-13 07:05:44 +08:00
|
|
|
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
size_t extra, bool zero);
|
2014-09-23 12:09:23 +08:00
|
|
|
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache);
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
2014-04-16 03:09:48 +08:00
|
|
|
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
2012-10-12 04:53:15 +08:00
|
|
|
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
|
|
|
|
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
2014-10-13 13:53:59 +08:00
|
|
|
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena_new(unsigned ind);
|
2012-02-29 08:50:47 +08:00
|
|
|
void arena_boot(void);
|
2012-03-14 07:31:41 +08:00
|
|
|
void arena_prefork(arena_t *arena);
|
|
|
|
void arena_postfork_parent(arena_t *arena);
|
|
|
|
void arena_postfork_child(arena_t *arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2014-08-30 04:34:40 +08:00
|
|
|
arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
|
|
|
arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
2014-09-29 16:31:39 +08:00
|
|
|
size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
|
|
|
|
void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
|
|
|
|
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
|
2012-05-02 15:30:36 +08:00
|
|
|
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t arena_mapbitsp_read(size_t *mapbitsp);
|
2012-05-02 15:30:36 +08:00
|
|
|
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
|
|
|
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
|
2013-10-20 12:40:20 +08:00
|
|
|
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size, size_t flags);
|
|
|
|
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size);
|
|
|
|
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size, size_t flags);
|
|
|
|
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
2014-10-06 08:54:10 +08:00
|
|
|
size_t runind, index_t binind, size_t flags);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t unzeroed);
|
2014-11-28 03:22:36 +08:00
|
|
|
void arena_metadata_allocated_add(arena_t *arena, size_t size);
|
|
|
|
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
|
|
|
size_t arena_metadata_allocated_get(arena_t *arena);
|
2013-02-07 03:59:30 +08:00
|
|
|
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
|
|
|
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
|
|
|
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
|
|
|
index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
2011-03-16 04:59:15 +08:00
|
|
|
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
2011-03-16 13:19:45 +08:00
|
|
|
const void *ptr);
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
|
|
|
|
void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
2014-09-23 12:09:23 +08:00
|
|
|
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache);
|
2014-11-28 03:22:36 +08:00
|
|
|
arena_t *arena_aalloc(const void *ptr);
|
2012-04-20 09:28:03 +08:00
|
|
|
size_t arena_salloc(const void *ptr, bool demote);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
2015-02-13 06:06:37 +08:00
|
|
|
void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
2012-05-02 15:30:36 +08:00
|
|
|
# ifdef JEMALLOC_ARENA_INLINE_A
|
2014-08-30 04:34:40 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
|
|
|
|
arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(pageind >= map_bias);
|
|
|
|
assert(pageind < chunk_npages);
|
|
|
|
|
|
|
|
return (&chunk->map_bits[pageind-map_bias]);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
|
|
|
arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(pageind >= map_bias);
|
|
|
|
assert(pageind < chunk_npages);
|
|
|
|
|
2014-08-30 04:34:40 +08:00
|
|
|
return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
|
|
|
|
(uintptr_t)map_misc_offset) + pageind-map_bias);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
|
|
|
|
{
|
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
|
|
|
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
|
|
|
|
map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
|
|
|
|
|
|
|
|
assert(pageind >= map_bias);
|
|
|
|
assert(pageind < chunk_npages);
|
|
|
|
|
|
|
|
return (pageind);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
|
|
|
arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
|
|
|
|
{
|
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
|
|
|
size_t pageind = arena_miscelm_to_pageind(miscelm);
|
|
|
|
|
|
|
|
return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
|
|
|
arena_run_to_miscelm(arena_run_t *run)
|
|
|
|
{
|
|
|
|
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
|
|
|
|
*)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
|
|
|
|
|
|
|
|
assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
|
|
|
|
assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
|
|
|
|
|
|
|
|
return (miscelm);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t *
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
|
2014-08-30 04:34:40 +08:00
|
|
|
return (&arena_bitselm_get(chunk, pageind)->bits);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 12:40:20 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_mapbitsp_read(size_t *mapbitsp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (*mapbitsp);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
|
2013-10-20 12:40:20 +08:00
|
|
|
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
|
|
|
return (mapbits & ~PAGE_MASK);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
|
|
|
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
|
|
|
|
return (mapbits & ~PAGE_MASK);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
|
|
|
CHUNK_MAP_ALLOCATED);
|
|
|
|
return (mapbits >> LG_PAGE);
|
|
|
|
}
|
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE index_t
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind;
|
2012-05-03 07:11:03 +08:00
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
|
|
|
assert(binind < NBINS || binind == BININD_INVALID);
|
|
|
|
return (binind);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
return (mapbits & CHUNK_MAP_DIRTY);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
return (mapbits & CHUNK_MAP_UNZEROED);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
return (mapbits & CHUNK_MAP_LARGE);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
return (mapbits & CHUNK_MAP_ALLOCATED);
|
|
|
|
}
|
|
|
|
|
2013-10-20 12:40:20 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
|
|
|
|
{
|
|
|
|
|
|
|
|
*mapbitsp = mapbits;
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|
|
|
size_t flags)
|
|
|
|
{
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert((size & PAGE_MASK) == 0);
|
|
|
|
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
2012-05-11 11:59:39 +08:00
|
|
|
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
|
2013-10-20 12:40:20 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size)
|
|
|
|
{
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert((size & PAGE_MASK) == 0);
|
2013-10-20 12:40:20 +08:00
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
|
|
|
arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|
|
|
size_t flags)
|
|
|
|
{
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-11 11:59:39 +08:00
|
|
|
size_t unzeroed;
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert((size & PAGE_MASK) == 0);
|
2012-05-11 11:59:39 +08:00
|
|
|
assert((flags & CHUNK_MAP_DIRTY) == flags);
|
2013-10-20 12:40:20 +08:00
|
|
|
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
|
|
|
|
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
|
|
|
|
| unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert(binind <= BININD_INVALID);
|
2014-10-06 08:54:10 +08:00
|
|
|
assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS);
|
2013-10-20 12:40:20 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
|
|
|
|
(binind << CHUNK_MAP_BININD_SHIFT));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind, size_t flags)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-11 11:59:39 +08:00
|
|
|
size_t unzeroed;
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert(binind < BININD_INVALID);
|
|
|
|
assert(pageind - runind >= map_bias);
|
2012-05-11 11:59:39 +08:00
|
|
|
assert((flags & CHUNK_MAP_DIRTY) == flags);
|
2013-10-20 12:40:20 +08:00
|
|
|
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
|
|
|
|
arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
|
|
|
|
CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t unzeroed)
|
|
|
|
{
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2013-10-20 12:40:20 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
|
|
|
|
unzeroed);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
arena_metadata_allocated_add(arena_t *arena, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_add_z(&arena->stats.metadata_allocated, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
arena_metadata_allocated_sub(arena_t *arena, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_sub_z(&arena->stats.metadata_allocated, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE size_t
|
|
|
|
arena_metadata_allocated_get(arena_t *arena)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (atomic_read_z(&arena->stats.metadata_allocated));
|
|
|
|
}
|
|
|
|
|
2013-02-07 03:59:30 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2012-11-14 04:56:27 +08:00
|
|
|
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(prof_interval != 0);
|
|
|
|
|
|
|
|
arena->prof_accumbytes += accumbytes;
|
|
|
|
if (arena->prof_accumbytes >= prof_interval) {
|
|
|
|
arena->prof_accumbytes -= prof_interval;
|
2013-02-07 03:59:30 +08:00
|
|
|
return (true);
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
2013-02-07 03:59:30 +08:00
|
|
|
return (false);
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
|
|
|
|
2013-02-07 03:59:30 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2012-11-14 04:56:27 +08:00
|
|
|
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(prof_interval == 0))
|
2013-02-07 03:59:30 +08:00
|
|
|
return (false);
|
|
|
|
return (arena_prof_accum_impl(arena, accumbytes));
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
|
|
|
|
2013-02-07 03:59:30 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2012-11-14 04:56:27 +08:00
|
|
|
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(prof_interval == 0))
|
2013-02-07 03:59:30 +08:00
|
|
|
return (false);
|
|
|
|
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arena->lock);
|
|
|
|
ret = arena_prof_accum_impl(arena, accumbytes);
|
|
|
|
malloc_mutex_unlock(&arena->lock);
|
|
|
|
return (ret);
|
|
|
|
}
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE index_t
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind;
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
|
|
|
|
|
|
|
if (config_debug) {
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_chunk_t *chunk;
|
|
|
|
arena_t *arena;
|
|
|
|
size_t pageind;
|
|
|
|
size_t actual_mapbits;
|
2014-09-29 16:31:39 +08:00
|
|
|
size_t rpages_ind;
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_run_t *run;
|
|
|
|
arena_bin_t *bin;
|
2014-10-11 14:01:03 +08:00
|
|
|
index_t run_binind, actual_binind;
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_bin_info_t *bin_info;
|
2014-09-29 16:31:39 +08:00
|
|
|
arena_chunk_map_misc_t *miscelm;
|
|
|
|
void *rpages;
|
2012-05-03 07:11:03 +08:00
|
|
|
|
|
|
|
assert(binind != BININD_INVALID);
|
|
|
|
assert(binind < NBINS);
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
arena = chunk->node.arena;
|
2012-05-03 07:11:03 +08:00
|
|
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
assert(mapbits == actual_mapbits);
|
2012-05-03 07:11:03 +08:00
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
2014-09-29 16:31:39 +08:00
|
|
|
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
|
|
|
|
pageind);
|
|
|
|
miscelm = arena_miscelm_get(chunk, rpages_ind);
|
|
|
|
run = &miscelm->run;
|
2014-10-11 14:01:03 +08:00
|
|
|
run_binind = run->binind;
|
|
|
|
bin = &arena->bins[run_binind];
|
2012-05-03 07:11:03 +08:00
|
|
|
actual_binind = bin - arena->bins;
|
2014-10-11 14:01:03 +08:00
|
|
|
assert(run_binind == actual_binind);
|
2012-05-03 07:11:03 +08:00
|
|
|
bin_info = &arena_bin_info[actual_binind];
|
2014-09-29 16:31:39 +08:00
|
|
|
rpages = arena_miscelm_to_rpages(miscelm);
|
|
|
|
assert(((uintptr_t)ptr - ((uintptr_t)rpages +
|
2012-05-02 15:30:36 +08:00
|
|
|
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
|
|
|
|
== 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (binind);
|
|
|
|
}
|
2014-10-06 08:54:10 +08:00
|
|
|
# endif /* JEMALLOC_ARENA_INLINE_A */
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
# ifdef JEMALLOC_ARENA_INLINE_B
|
|
|
|
JEMALLOC_INLINE index_t
|
2011-03-16 04:59:15 +08:00
|
|
|
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
|
|
|
{
|
2014-10-06 08:54:10 +08:00
|
|
|
index_t binind = bin - arena->bins;
|
2012-02-29 08:50:47 +08:00
|
|
|
assert(binind < NBINS);
|
2011-03-16 04:59:15 +08:00
|
|
|
return (binind);
|
|
|
|
}
|
|
|
|
|
2010-10-21 11:52:00 +08:00
|
|
|
JEMALLOC_INLINE unsigned
|
2011-03-16 13:19:45 +08:00
|
|
|
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
|
2010-10-21 11:52:00 +08:00
|
|
|
{
|
|
|
|
unsigned shift, diff, regind;
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t interval;
|
2014-09-29 16:31:39 +08:00
|
|
|
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
|
|
|
|
void *rpages = arena_miscelm_to_rpages(miscelm);
|
2010-10-21 11:52:00 +08:00
|
|
|
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/*
|
|
|
|
* Freeing a pointer lower than region zero can cause assertion
|
|
|
|
* failure.
|
|
|
|
*/
|
2014-09-29 16:31:39 +08:00
|
|
|
assert((uintptr_t)ptr >= (uintptr_t)rpages +
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
(uintptr_t)bin_info->reg0_offset);
|
2010-10-21 11:52:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid doing division with a variable divisor if possible. Using
|
|
|
|
* actual division here can reduce allocator throughput by over 20%!
|
|
|
|
*/
|
2014-09-29 16:31:39 +08:00
|
|
|
diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
|
2011-03-16 04:59:15 +08:00
|
|
|
bin_info->reg0_offset);
|
2010-10-21 11:52:00 +08:00
|
|
|
|
|
|
|
/* Rescale (factor powers of 2 out of the numerator and denominator). */
|
2012-04-06 15:35:09 +08:00
|
|
|
interval = bin_info->reg_interval;
|
2014-05-29 10:37:02 +08:00
|
|
|
shift = jemalloc_ffs(interval) - 1;
|
2010-10-21 11:52:00 +08:00
|
|
|
diff >>= shift;
|
2012-04-06 15:35:09 +08:00
|
|
|
interval >>= shift;
|
2010-10-21 11:52:00 +08:00
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
if (interval == 1) {
|
2010-10-21 11:52:00 +08:00
|
|
|
/* The divisor was a power of 2. */
|
|
|
|
regind = diff;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* To divide by a number D that is not a power of two we
|
|
|
|
* multiply by (2^21 / D) and then right shift by 21 positions.
|
|
|
|
*
|
|
|
|
* X / D
|
|
|
|
*
|
|
|
|
* becomes
|
|
|
|
*
|
2012-04-06 15:35:09 +08:00
|
|
|
* (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
|
2010-10-21 11:52:00 +08:00
|
|
|
*
|
|
|
|
* We can omit the first three elements, because we never
|
|
|
|
* divide by 0, and 1 and 2 are both powers of two, which are
|
|
|
|
* handled above.
|
|
|
|
*/
|
2011-03-23 00:00:56 +08:00
|
|
|
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
|
|
|
|
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
|
2012-04-06 15:35:09 +08:00
|
|
|
static const unsigned interval_invs[] = {
|
2010-10-21 11:52:00 +08:00
|
|
|
SIZE_INV(3),
|
|
|
|
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
|
|
|
|
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
|
|
|
|
SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
|
|
|
|
SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
|
|
|
|
SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
|
|
|
|
SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
|
|
|
|
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
|
|
|
|
};
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(interval <= ((sizeof(interval_invs) /
|
|
|
|
sizeof(unsigned)) + 2))) {
|
2012-04-06 15:35:09 +08:00
|
|
|
regind = (diff * interval_invs[interval - 3]) >>
|
|
|
|
SIZE_INV_SHIFT;
|
|
|
|
} else
|
|
|
|
regind = diff / interval;
|
2010-10-21 11:52:00 +08:00
|
|
|
#undef SIZE_INV
|
|
|
|
#undef SIZE_INV_SHIFT
|
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(diff == regind * interval);
|
2011-03-16 04:59:15 +08:00
|
|
|
assert(regind < bin_info->nregs);
|
2010-10-21 11:52:00 +08:00
|
|
|
|
|
|
|
return (regind);
|
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
JEMALLOC_INLINE prof_tctx_t *
|
|
|
|
arena_prof_tctx_get(const void *ptr)
|
2010-10-21 11:52:00 +08:00
|
|
|
{
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *ret;
|
2010-10-21 11:52:00 +08:00
|
|
|
arena_chunk_t *chunk;
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-21 11:52:00 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
|
|
|
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(chunk != ptr)) {
|
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
|
|
|
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
|
|
|
|
ret = (prof_tctx_t *)(uintptr_t)1U;
|
2015-02-13 07:54:53 +08:00
|
|
|
else {
|
|
|
|
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
|
|
|
|
pageind);
|
|
|
|
ret = atomic_read_p((void **)&elm->prof_tctx);
|
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
} else
|
|
|
|
ret = huge_prof_tctx_get(ptr);
|
2010-10-21 11:52:00 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2010-10-23 01:45:59 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2014-08-19 07:22:13 +08:00
|
|
|
arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
2010-10-23 01:45:59 +08:00
|
|
|
{
|
|
|
|
arena_chunk_t *chunk;
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-23 01:45:59 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
|
|
|
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(chunk != ptr)) {
|
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
2013-12-16 08:21:30 +08:00
|
|
|
|
2015-02-13 07:54:53 +08:00
|
|
|
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
|
|
|
|
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
|
|
|
|
pageind);
|
|
|
|
atomic_write_p((void **)&elm->prof_tctx, tctx);
|
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
} else
|
|
|
|
huge_prof_tctx_set(ptr, tctx);
|
2010-10-23 01:45:59 +08:00
|
|
|
}
|
2010-10-21 11:52:00 +08:00
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2014-09-23 12:09:23 +08:00
|
|
|
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache)
|
2012-02-14 04:29:49 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(size != 0);
|
2012-03-22 09:33:03 +08:00
|
|
|
assert(size <= arena_maxclass);
|
2012-02-14 04:29:49 +08:00
|
|
|
|
2015-02-14 07:28:56 +08:00
|
|
|
arena = arena_choose(tsd, arena);
|
|
|
|
if (unlikely(arena == NULL))
|
|
|
|
return (NULL);
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(size <= SMALL_MAXCLASS)) {
|
2015-02-14 07:28:56 +08:00
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
return (tcache_alloc_small(tsd, arena, tcache, size,
|
|
|
|
zero));
|
|
|
|
} else
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (arena_malloc_small(arena, size, zero));
|
2015-02-13 06:06:37 +08:00
|
|
|
} else if (likely(size <= arena_maxclass)) {
|
2012-02-14 07:18:19 +08:00
|
|
|
/*
|
|
|
|
* Initialize tcache after checking size in order to avoid
|
|
|
|
* infinite recursion during tcache initialization.
|
|
|
|
*/
|
2015-02-14 07:28:56 +08:00
|
|
|
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
|
|
|
return (tcache_alloc_large(tsd, arena, tcache, size,
|
|
|
|
zero));
|
|
|
|
} else
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (arena_malloc_large(arena, size, zero));
|
2015-02-13 06:06:37 +08:00
|
|
|
} else
|
|
|
|
return (huge_malloc(tsd, arena, size, zero, tcache));
|
2012-02-14 04:29:49 +08:00
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
|
|
|
arena_aalloc(const void *ptr)
|
|
|
|
{
|
|
|
|
arena_chunk_t *chunk;
|
|
|
|
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(chunk != ptr))
|
|
|
|
return (chunk->node.arena);
|
|
|
|
else
|
|
|
|
return (huge_aalloc(ptr));
|
2014-11-28 03:22:36 +08:00
|
|
|
}
|
|
|
|
|
2012-04-20 09:28:03 +08:00
|
|
|
/* Return the size of the allocation pointed to by ptr. */
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2012-04-20 09:28:03 +08:00
|
|
|
arena_salloc(const void *ptr, bool demote)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
arena_chunk_t *chunk;
|
2014-10-06 08:54:10 +08:00
|
|
|
size_t pageind;
|
|
|
|
index_t binind;
|
2012-04-20 09:28:03 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
|
|
|
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(chunk != ptr)) {
|
|
|
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
|
|
binind = arena_mapbits_binind_get(chunk, pageind);
|
|
|
|
if (unlikely(binind == BININD_INVALID || (config_prof && !demote
|
|
|
|
&& arena_mapbits_large_get(chunk, pageind) != 0))) {
|
|
|
|
/*
|
|
|
|
* Large allocation. In the common case (demote), and
|
|
|
|
* as this is an inline function, most callers will only
|
|
|
|
* end up looking at binind to determine that ptr is a
|
|
|
|
* small allocation.
|
|
|
|
*/
|
|
|
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
|
|
|
ret = arena_mapbits_large_size_get(chunk, pageind);
|
|
|
|
assert(ret != 0);
|
|
|
|
assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
|
|
|
|
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
|
|
|
arena_mapbits_dirty_get(chunk,
|
|
|
|
pageind+(ret>>LG_PAGE)-1));
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Small allocation (possibly promoted to a large
|
|
|
|
* object).
|
|
|
|
*/
|
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
|
|
|
arena_ptr_small_binind_get(ptr,
|
|
|
|
arena_mapbits_get(chunk, pageind)) == binind);
|
|
|
|
ret = index2size(binind);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
ret = huge_salloc(ptr);
|
2012-04-20 09:28:03 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
arena_chunk_t *chunk;
|
2012-05-02 15:30:36 +08:00
|
|
|
size_t pageind, mapbits;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(chunk != ptr)) {
|
|
|
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
|
|
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
|
|
|
|
/* Small allocation. */
|
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
index_t binind = arena_ptr_small_binind_get(ptr,
|
|
|
|
mapbits);
|
|
|
|
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
|
|
|
} else {
|
|
|
|
arena_dalloc_small(chunk->node.arena, chunk,
|
|
|
|
ptr, pageind);
|
|
|
|
}
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
} else {
|
2015-02-13 06:06:37 +08:00
|
|
|
size_t size = arena_mapbits_large_size_get(chunk,
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
pageind);
|
2012-02-14 04:29:49 +08:00
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
2012-02-14 04:29:49 +08:00
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
|
|
|
tcache_dalloc_large(tsd, tcache, ptr, size);
|
|
|
|
else {
|
|
|
|
arena_dalloc_large(chunk->node.arena, chunk,
|
|
|
|
ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
huge_dalloc(tsd, ptr, tcache);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2015-02-13 06:06:37 +08:00
|
|
|
arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
|
2014-08-29 03:41:48 +08:00
|
|
|
{
|
2015-02-13 06:06:37 +08:00
|
|
|
arena_chunk_t *chunk;
|
2014-08-29 03:41:48 +08:00
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
|
|
|
if (likely(chunk != ptr)) {
|
|
|
|
if (config_prof && opt_prof) {
|
2014-09-12 07:20:44 +08:00
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
|
|
|
LG_PAGE;
|
2015-02-13 06:06:37 +08:00
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
|
|
if (arena_mapbits_large_get(chunk, pageind) != 0) {
|
|
|
|
/*
|
|
|
|
* Make sure to use promoted size, not request
|
|
|
|
* size.
|
|
|
|
*/
|
|
|
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
|
|
|
size = arena_mapbits_large_size_get(chunk,
|
|
|
|
pageind);
|
|
|
|
}
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
assert(s2u(size) == s2u(arena_salloc(ptr, false)));
|
|
|
|
|
|
|
|
if (likely(size <= SMALL_MAXCLASS)) {
|
|
|
|
/* Small allocation. */
|
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
index_t binind = size2index(size);
|
|
|
|
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
|
|
|
} else {
|
|
|
|
size_t pageind = ((uintptr_t)ptr -
|
|
|
|
(uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
arena_dalloc_small(chunk->node.arena, chunk,
|
|
|
|
ptr, pageind);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
if (likely(tcache != NULL) && size <= tcache_maxclass)
|
|
|
|
tcache_dalloc_large(tsd, tcache, ptr, size);
|
|
|
|
else {
|
|
|
|
arena_dalloc_large(chunk->node.arena, chunk,
|
|
|
|
ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
huge_dalloc(tsd, ptr, tcache);
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
2014-10-06 08:54:10 +08:00
|
|
|
# endif /* JEMALLOC_ARENA_INLINE_B */
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|