2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
|
|
|
|
2011-03-23 00:00:56 +08:00
|
|
|
/* Maximum number of regions in one run. */
|
2014-09-29 16:31:39 +08:00
|
|
|
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
2011-03-23 00:00:56 +08:00
|
|
|
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* The minimum ratio of active:dirty pages per arena is computed as:
|
|
|
|
*
|
2015-03-19 09:55:33 +08:00
|
|
|
* (nactive >> lg_dirty_mult) >= ndirty
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
2015-03-19 09:55:33 +08:00
|
|
|
* So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
|
|
|
|
* many active pages as dirty pages.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2012-10-31 06:42:37 +08:00
|
|
|
#define LG_DIRTY_MULT_DEFAULT 3
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
typedef enum {
|
|
|
|
purge_mode_ratio = 0,
|
|
|
|
purge_mode_decay = 1,
|
|
|
|
|
|
|
|
purge_mode_limit = 2
|
|
|
|
} purge_mode_t;
|
|
|
|
#define PURGE_DEFAULT purge_mode_ratio
|
|
|
|
/* Default decay time in seconds. */
|
|
|
|
#define DECAY_TIME_DEFAULT 10
|
|
|
|
/* Number of event ticks between time checks. */
|
|
|
|
#define DECAY_NTICKS_PER_UPDATE 1000
|
|
|
|
|
2015-03-11 09:15:40 +08:00
|
|
|
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
|
2016-03-01 03:54:42 +08:00
|
|
|
typedef struct arena_avail_links_s arena_avail_links_t;
|
2014-09-29 16:31:39 +08:00
|
|
|
typedef struct arena_run_s arena_run_t;
|
2014-08-30 04:34:40 +08:00
|
|
|
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
|
|
|
|
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
typedef struct arena_chunk_s arena_chunk_t;
|
2011-03-16 04:59:15 +08:00
|
|
|
typedef struct arena_bin_info_s arena_bin_info_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
typedef struct arena_bin_s arena_bin_t;
|
|
|
|
typedef struct arena_s arena_t;
|
2016-02-20 11:37:10 +08:00
|
|
|
typedef struct arena_tdata_s arena_tdata_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
#ifdef JEMALLOC_ARENA_STRUCTS_A
|
2014-09-29 16:31:39 +08:00
|
|
|
struct arena_run_s {
|
2014-10-11 14:01:03 +08:00
|
|
|
/* Index of bin this run is associated with. */
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t binind;
|
2014-09-29 16:31:39 +08:00
|
|
|
|
|
|
|
/* Number of free regions in run. */
|
|
|
|
unsigned nfree;
|
|
|
|
|
|
|
|
/* Per region allocated/deallocated bitmap. */
|
|
|
|
bitmap_t bitmap[BITMAP_GROUPS_MAX];
|
|
|
|
};
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Each element of the chunk map corresponds to one page within the chunk. */
|
2014-08-30 04:34:40 +08:00
|
|
|
struct arena_chunk_map_bits_s {
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Run address (or size) and various flags are stored together. The bit
|
|
|
|
* layout looks like (assuming 32-bit system):
|
|
|
|
*
|
2015-08-14 05:54:06 +08:00
|
|
|
* ???????? ???????? ???nnnnn nnndumla
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
|
|
|
* ? : Unallocated: Run address for first/last pages, unset for internal
|
|
|
|
* pages.
|
2010-03-19 11:36:40 +08:00
|
|
|
* Small: Run page offset.
|
2015-08-05 01:49:46 +08:00
|
|
|
* Large: Run page count for first page, unset for trailing pages.
|
2012-05-10 15:18:46 +08:00
|
|
|
* n : binind for small size class, BININD_INVALID for large size class.
|
2010-01-17 01:53:50 +08:00
|
|
|
* d : dirty?
|
2010-12-18 10:07:53 +08:00
|
|
|
* u : unzeroed?
|
2015-08-05 01:49:46 +08:00
|
|
|
* m : decommitted?
|
2010-01-17 01:53:50 +08:00
|
|
|
* l : large?
|
|
|
|
* a : allocated?
|
|
|
|
*
|
|
|
|
* Following are example bit patterns for the three types of runs.
|
|
|
|
*
|
|
|
|
* p : run page offset
|
|
|
|
* s : run size
|
2012-05-02 15:30:36 +08:00
|
|
|
* n : binind for size class; large objects set these to BININD_INVALID
|
2010-01-17 01:53:50 +08:00
|
|
|
* x : don't care
|
|
|
|
* - : 0
|
2010-04-01 07:45:04 +08:00
|
|
|
* + : 1
|
2015-08-05 01:49:46 +08:00
|
|
|
* [DUMLA] : bit set
|
|
|
|
* [dumla] : bit unset
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
2010-03-19 11:36:40 +08:00
|
|
|
* Unallocated (clean):
|
2015-08-05 01:49:46 +08:00
|
|
|
* ssssssss ssssssss sss+++++ +++dum-a
|
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
|
|
|
|
* ssssssss ssssssss sss+++++ +++dUm-a
|
2010-03-19 11:36:40 +08:00
|
|
|
*
|
|
|
|
* Unallocated (dirty):
|
2015-08-05 01:49:46 +08:00
|
|
|
* ssssssss ssssssss sss+++++ +++D-m-a
|
2012-05-02 15:30:36 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2015-08-05 01:49:46 +08:00
|
|
|
* ssssssss ssssssss sss+++++ +++D-m-a
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
2010-03-18 07:27:39 +08:00
|
|
|
* Small:
|
2015-08-05 01:49:46 +08:00
|
|
|
* pppppppp pppppppp pppnnnnn nnnd---A
|
|
|
|
* pppppppp pppppppp pppnnnnn nnn----A
|
|
|
|
* pppppppp pppppppp pppnnnnn nnnd---A
|
2010-01-17 01:53:50 +08:00
|
|
|
*
|
|
|
|
* Large:
|
2015-08-05 01:49:46 +08:00
|
|
|
* ssssssss ssssssss sss+++++ +++D--LA
|
2012-05-02 15:30:36 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2015-08-05 01:49:46 +08:00
|
|
|
* -------- -------- ---+++++ +++D--LA
|
2010-04-01 07:45:04 +08:00
|
|
|
*
|
2014-10-06 08:54:10 +08:00
|
|
|
* Large (sampled, size <= LARGE_MINCLASS):
|
2015-08-05 01:49:46 +08:00
|
|
|
* ssssssss ssssssss sssnnnnn nnnD--LA
|
2014-10-06 08:54:10 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2015-08-05 01:49:46 +08:00
|
|
|
* -------- -------- ---+++++ +++D--LA
|
2010-04-01 07:45:04 +08:00
|
|
|
*
|
2014-10-06 08:54:10 +08:00
|
|
|
* Large (not sampled, size == LARGE_MINCLASS):
|
2015-08-05 01:49:46 +08:00
|
|
|
* ssssssss ssssssss sss+++++ +++D--LA
|
2014-10-06 08:54:10 +08:00
|
|
|
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
2015-08-05 01:49:46 +08:00
|
|
|
* -------- -------- ---+++++ +++D--LA
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
|
|
|
size_t bits;
|
2015-08-05 01:49:46 +08:00
|
|
|
#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
|
|
|
|
#define CHUNK_MAP_LARGE ((size_t)0x02U)
|
|
|
|
#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
|
|
|
|
|
|
|
|
#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
|
|
|
|
#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
|
|
|
|
#define CHUNK_MAP_DIRTY ((size_t)0x10U)
|
|
|
|
#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
|
|
|
|
|
|
|
|
#define CHUNK_MAP_BININD_SHIFT 5
|
2012-05-02 15:30:36 +08:00
|
|
|
#define BININD_INVALID ((size_t)0xffU)
|
2015-08-05 01:49:46 +08:00
|
|
|
#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
|
2012-05-02 15:30:36 +08:00
|
|
|
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
|
2015-08-05 01:49:46 +08:00
|
|
|
|
|
|
|
#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
|
|
|
|
#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
|
|
|
|
#define CHUNK_MAP_SIZE_MASK \
|
|
|
|
(~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
2014-08-30 04:34:40 +08:00
|
|
|
|
2015-03-11 09:15:40 +08:00
|
|
|
struct arena_runs_dirty_link_s {
|
|
|
|
qr(arena_runs_dirty_link_t) rd_link;
|
|
|
|
};
|
|
|
|
|
2014-08-30 04:34:40 +08:00
|
|
|
/*
|
|
|
|
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
|
|
|
|
* like arena_chunk_map_bits_t. Two separate arrays are stored within each
|
|
|
|
* chunk header in order to improve cache locality.
|
|
|
|
*/
|
|
|
|
struct arena_chunk_map_misc_s {
|
|
|
|
/*
|
2016-03-08 17:04:48 +08:00
|
|
|
* Linkage for run heaps. There are two disjoint uses:
|
2014-08-30 04:34:40 +08:00
|
|
|
*
|
2016-03-08 17:04:48 +08:00
|
|
|
* 1) arena_t's runs_avail heaps.
|
2014-08-30 04:34:40 +08:00
|
|
|
* 2) arena_run_t conceptually uses this linkage for in-use non-full
|
2015-03-11 09:15:40 +08:00
|
|
|
* runs, rather than directly embedding linkage.
|
2014-08-30 04:34:40 +08:00
|
|
|
*/
|
2016-03-27 08:30:37 +08:00
|
|
|
phn(arena_chunk_map_misc_t) ph_link;
|
2014-08-30 04:34:40 +08:00
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
union {
|
|
|
|
/* Linkage for list of dirty runs. */
|
2016-03-08 17:04:48 +08:00
|
|
|
arena_runs_dirty_link_t rd;
|
2014-08-30 04:34:40 +08:00
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
/* Profile counters, used for large object runs. */
|
2015-05-08 13:35:40 +08:00
|
|
|
union {
|
2016-02-23 02:44:58 +08:00
|
|
|
void *prof_tctx_pun;
|
|
|
|
prof_tctx_t *prof_tctx;
|
2015-05-08 13:35:40 +08:00
|
|
|
};
|
2014-09-29 16:31:39 +08:00
|
|
|
|
|
|
|
/* Small region run metadata. */
|
|
|
|
arena_run_t run;
|
|
|
|
};
|
2014-08-30 04:34:40 +08:00
|
|
|
};
|
2016-03-27 08:30:37 +08:00
|
|
|
typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
|
2015-02-16 10:04:46 +08:00
|
|
|
#endif /* JEMALLOC_ARENA_STRUCTS_A */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
#ifdef JEMALLOC_ARENA_STRUCTS_B
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Arena chunk header. */
|
|
|
|
struct arena_chunk_s {
|
2010-10-02 08:35:43 +08:00
|
|
|
/*
|
|
|
|
* Map of pages within chunk that keeps track of free/large/small. The
|
|
|
|
* first map_bias entries are omitted, since the chunk header does not
|
|
|
|
* need to be tracked in the map. This omission saves a header page
|
|
|
|
* for common chunk sizes (e.g. 4 MiB).
|
|
|
|
*/
|
2014-08-30 04:34:40 +08:00
|
|
|
arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
|
|
|
|
2011-03-16 04:59:15 +08:00
|
|
|
/*
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
* Read-only information associated with each element of arena_t's bins array
|
2011-03-16 04:59:15 +08:00
|
|
|
* is stored separately, partly to reduce memory usage (only one copy, rather
|
|
|
|
* than one per arena), but mainly to avoid false cacheline sharing.
|
2012-04-06 15:35:09 +08:00
|
|
|
*
|
|
|
|
* Each run has the following layout:
|
|
|
|
*
|
2016-04-06 09:18:15 +08:00
|
|
|
* /--------------------\
|
|
|
|
* | region 0 |
|
|
|
|
* |--------------------|
|
|
|
|
* | region 1 |
|
|
|
|
* |--------------------|
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* |--------------------|
|
|
|
|
* | region nregs-1 |
|
|
|
|
* \--------------------/
|
2011-03-16 04:59:15 +08:00
|
|
|
*/
|
|
|
|
struct arena_bin_info_s {
|
|
|
|
/* Size of regions in a run for this bin's size class. */
|
2016-02-23 02:44:58 +08:00
|
|
|
size_t reg_size;
|
2011-03-16 04:59:15 +08:00
|
|
|
|
|
|
|
/* Total size of a run for this bin's size class. */
|
2016-02-23 02:44:58 +08:00
|
|
|
size_t run_size;
|
2011-03-16 04:59:15 +08:00
|
|
|
|
|
|
|
/* Total number of regions in a run for this bin's size class. */
|
2016-02-23 02:44:58 +08:00
|
|
|
uint32_t nregs;
|
2011-03-16 04:59:15 +08:00
|
|
|
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/*
|
|
|
|
* Metadata used to manipulate bitmaps for runs associated with this
|
|
|
|
* bin.
|
|
|
|
*/
|
2016-02-23 02:44:58 +08:00
|
|
|
bitmap_info_t bitmap_info;
|
2011-03-16 04:59:15 +08:00
|
|
|
};
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
struct arena_bin_s {
|
2010-03-14 12:32:56 +08:00
|
|
|
/*
|
|
|
|
* All operations on runcur, runs, and stats require that lock be
|
|
|
|
* locked. Run allocation/deallocation are protected by the arena lock,
|
|
|
|
* which may be acquired while holding one or more bin locks, but not
|
|
|
|
* vise versa.
|
|
|
|
*/
|
2016-02-23 02:44:58 +08:00
|
|
|
malloc_mutex_t lock;
|
2010-03-14 12:32:56 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Current run being used to service allocations of this bin's size
|
|
|
|
* class.
|
|
|
|
*/
|
2016-02-23 02:44:58 +08:00
|
|
|
arena_run_t *runcur;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/*
|
2016-03-08 17:04:48 +08:00
|
|
|
* Heap of non-full runs. This heap is used when looking for an
|
2010-01-17 01:53:50 +08:00
|
|
|
* existing run when runcur is no longer usable. We choose the
|
|
|
|
* non-full run that is lowest in memory; this policy tends to keep
|
|
|
|
* objects packed well, and it can also help reduce the number of
|
|
|
|
* almost-empty chunks.
|
|
|
|
*/
|
2016-03-27 08:30:37 +08:00
|
|
|
arena_run_heap_t runs;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* Bin statistics. */
|
2016-02-23 02:44:58 +08:00
|
|
|
malloc_bin_stats_t stats;
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct arena_s {
|
2010-02-11 02:37:56 +08:00
|
|
|
/* This arena's index within the arenas array. */
|
|
|
|
unsigned ind;
|
|
|
|
|
2010-03-14 12:32:56 +08:00
|
|
|
/*
|
2016-04-23 05:34:14 +08:00
|
|
|
* Number of threads currently assigned to this arena, synchronized via
|
|
|
|
* atomic operations. Each thread has two distinct assignments, one for
|
|
|
|
* application-serving allocation, and the other for internal metadata
|
|
|
|
* allocation. Internal metadata must not be allocated from arenas
|
|
|
|
* created via the arenas.extend mallctl, because the arena.<i>.reset
|
|
|
|
* mallctl indiscriminately discards all allocations for the affected
|
|
|
|
* arena.
|
|
|
|
*
|
|
|
|
* 0: Application allocation.
|
|
|
|
* 1: Internal metadata allocation.
|
2011-03-19 04:41:33 +08:00
|
|
|
*/
|
2016-04-23 05:34:14 +08:00
|
|
|
unsigned nthreads[2];
|
2011-03-19 04:41:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There are three classes of arena operations from a locking
|
|
|
|
* perspective:
|
2016-02-25 15:58:10 +08:00
|
|
|
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
|
2011-03-19 04:41:33 +08:00
|
|
|
* 2) Bin-related operations are protected by bin locks.
|
|
|
|
* 3) Chunk- and run-related operations are protected by this mutex.
|
2010-03-14 12:32:56 +08:00
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_t lock;
|
|
|
|
|
|
|
|
arena_stats_t stats;
|
|
|
|
/*
|
|
|
|
* List of tcaches for extant threads associated with this arena.
|
2015-01-30 07:30:47 +08:00
|
|
|
* Stats from these are merged incrementally, and at exit if
|
|
|
|
* opt_stats_print is enabled.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
|
|
|
ql_head(tcache_t) tcache_ql;
|
|
|
|
|
2010-02-12 05:19:21 +08:00
|
|
|
uint64_t prof_accumbytes;
|
|
|
|
|
2015-05-05 00:58:36 +08:00
|
|
|
/*
|
|
|
|
* PRNG state for cache index randomization of large allocation base
|
|
|
|
* pointers.
|
|
|
|
*/
|
|
|
|
uint64_t offset_state;
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t dss_prec;
|
|
|
|
|
2016-04-23 05:37:17 +08:00
|
|
|
/* Extant arena chunks. */
|
2016-03-24 12:09:28 +08:00
|
|
|
ql_head(extent_t) achunks;
|
2016-04-23 05:37:17 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* In order to avoid rapid chunk allocation/deallocation when an arena
|
|
|
|
* oscillates right on the cusp of needing a new chunk, cache the most
|
|
|
|
* recently freed chunk. The spare is left in the arena's chunk trees
|
|
|
|
* until it is deleted.
|
|
|
|
*
|
|
|
|
* There is one spare chunk per arena, rather than one spare total, in
|
|
|
|
* order to avoid interactions between multiple threads that could make
|
|
|
|
* a single spare inadequate.
|
|
|
|
*/
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *spare;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
/* Minimum ratio (log base 2) of nactive:ndirty. */
|
|
|
|
ssize_t lg_dirty_mult;
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
/* True if a thread is currently executing arena_purge_to_limit(). */
|
2015-06-23 09:50:32 +08:00
|
|
|
bool purging;
|
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
/* Number of pages in active runs and huge regions. */
|
2010-01-25 08:41:01 +08:00
|
|
|
size_t nactive;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Current count of pages within unused runs that are potentially
|
|
|
|
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
|
|
|
|
* By tracking this, we can institute a limit on how much dirty unused
|
|
|
|
* memory is mapped for each arena.
|
|
|
|
*/
|
|
|
|
size_t ndirty;
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
/*
|
|
|
|
* Unused dirty memory this arena manages. Dirty memory is conceptually
|
2015-02-18 17:15:50 +08:00
|
|
|
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
|
|
|
|
* chunks, but the list linkage is actually semi-duplicated in order to
|
|
|
|
* avoid extra arena_chunk_map_misc_t space overhead.
|
2015-02-16 10:04:46 +08:00
|
|
|
*
|
|
|
|
* LRU-----------------------------------------------------------MRU
|
|
|
|
*
|
2015-03-11 09:29:49 +08:00
|
|
|
* /-- arena ---\
|
|
|
|
* | |
|
|
|
|
* | |
|
2016-03-24 12:09:28 +08:00
|
|
|
* |------------| /-- chunk --\
|
|
|
|
* ...->|chunks_cache|<--------------------------->| /------\ |<--...
|
|
|
|
* |------------| | |extent| |
|
|
|
|
* | | | | | |
|
|
|
|
* | | /- run -\ /- run -\ | | | |
|
|
|
|
* | | | | | | | | | |
|
|
|
|
* | | | | | | | | | |
|
|
|
|
* |------------| |-------| |-------| | |------| |
|
|
|
|
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
|
|
|
|
* |------------| |-------| |-------| | |------| |
|
|
|
|
* | | | | | | | | | |
|
|
|
|
* | | | | | | | \------/ |
|
|
|
|
* | | \-------/ \-------/ | |
|
|
|
|
* | | | |
|
|
|
|
* | | | |
|
|
|
|
* \------------/ \-----------/
|
2015-02-16 10:04:46 +08:00
|
|
|
*/
|
2015-03-11 09:15:40 +08:00
|
|
|
arena_runs_dirty_link_t runs_dirty;
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_t chunks_cache;
|
2014-08-15 05:45:58 +08:00
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
/*
|
|
|
|
* Approximate time in seconds from the creation of a set of unused
|
|
|
|
* dirty pages until an equivalent set of unused dirty pages is purged
|
|
|
|
* and/or reused.
|
|
|
|
*/
|
|
|
|
ssize_t decay_time;
|
|
|
|
/* decay_time / SMOOTHSTEP_NSTEPS. */
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_t decay_interval;
|
2016-02-20 12:09:31 +08:00
|
|
|
/*
|
|
|
|
* Time at which the current decay interval logically started. We do
|
|
|
|
* not actually advance to a new epoch until sometime after it starts
|
|
|
|
* because of scheduling and computation delays, and it is even possible
|
|
|
|
* to completely skip epochs. In all cases, during epoch advancement we
|
|
|
|
* merge all relevant activity into the most recently recorded epoch.
|
|
|
|
*/
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_t decay_epoch;
|
2016-02-20 12:09:31 +08:00
|
|
|
/* decay_deadline randomness generator. */
|
|
|
|
uint64_t decay_jitter_state;
|
|
|
|
/*
|
|
|
|
* Deadline for current epoch. This is the sum of decay_interval and
|
|
|
|
* per epoch jitter which is a uniform random variable in
|
|
|
|
* [0..decay_interval). Epochs always advance by precise multiples of
|
|
|
|
* decay_interval, but we randomize the deadline to reduce the
|
|
|
|
* likelihood of arenas purging in lockstep.
|
|
|
|
*/
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_t decay_deadline;
|
2016-02-20 12:09:31 +08:00
|
|
|
/*
|
|
|
|
* Number of dirty pages at beginning of current epoch. During epoch
|
|
|
|
* advancement we use the delta between decay_ndirty and ndirty to
|
|
|
|
* determine how many dirty pages, if any, were generated, and record
|
|
|
|
* the result in decay_backlog.
|
|
|
|
*/
|
|
|
|
size_t decay_ndirty;
|
|
|
|
/*
|
|
|
|
* Memoized result of arena_decay_backlog_npages_limit() corresponding
|
|
|
|
* to the current contents of decay_backlog, i.e. the limit on how many
|
|
|
|
* pages are allowed to exist for the decay epochs.
|
|
|
|
*/
|
|
|
|
size_t decay_backlog_npages_limit;
|
|
|
|
/*
|
|
|
|
* Trailing log of how many unused dirty pages were generated during
|
|
|
|
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
|
|
|
* element is the most recent epoch. Corresponding epoch times are
|
|
|
|
* relative to decay_epoch.
|
|
|
|
*/
|
|
|
|
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
|
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/* Extant huge allocations. */
|
2016-03-24 12:09:28 +08:00
|
|
|
ql_head(extent_t) huge;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/* Synchronizes all huge allocation/update/deallocation. */
|
|
|
|
malloc_mutex_t huge_mtx;
|
|
|
|
|
|
|
|
/*
|
2016-05-18 05:58:56 +08:00
|
|
|
* Heaps of chunks that were previously allocated. These are used when
|
2016-05-17 04:25:18 +08:00
|
|
|
* allocating chunks, in an attempt to re-use address space.
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
*/
|
2016-05-18 05:58:56 +08:00
|
|
|
extent_heap_t chunks_cached[NPSIZES];
|
|
|
|
extent_heap_t chunks_retained[NPSIZES];
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_t chunks_mtx;
|
2016-03-24 12:09:28 +08:00
|
|
|
/* Cache of extent structures that were allocated via base_alloc(). */
|
|
|
|
ql_head(extent_t) extent_cache;
|
|
|
|
malloc_mutex_t extent_cache_mtx;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/* User-configurable chunk hook functions. */
|
|
|
|
chunk_hooks_t chunk_hooks;
|
2014-05-06 06:16:56 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
/* bins is used to store trees of free regions. */
|
|
|
|
arena_bin_t bins[NBINS];
|
2016-02-24 04:06:21 +08:00
|
|
|
|
|
|
|
/*
|
2016-04-09 05:17:57 +08:00
|
|
|
* Size-segregated address-ordered heaps of this arena's available runs,
|
|
|
|
* used for first-best-fit run allocation. Runs are quantized, i.e.
|
|
|
|
* they reside in the last heap which corresponds to a size class less
|
|
|
|
* than or equal to the run size.
|
2016-02-24 04:06:21 +08:00
|
|
|
*/
|
2016-04-09 05:17:57 +08:00
|
|
|
arena_run_heap_t runs_avail[NPSIZES];
|
2010-01-17 01:53:50 +08:00
|
|
|
};
|
2016-02-20 11:37:10 +08:00
|
|
|
|
|
|
|
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
|
|
|
struct arena_tdata_s {
|
2016-02-20 12:09:31 +08:00
|
|
|
ticker_t decay_ticker;
|
2016-02-20 11:37:10 +08:00
|
|
|
};
|
2015-02-16 10:04:46 +08:00
|
|
|
#endif /* JEMALLOC_ARENA_STRUCTS_B */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
2015-05-05 00:58:36 +08:00
|
|
|
static const size_t large_pad =
|
|
|
|
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
|
|
|
PAGE
|
|
|
|
#else
|
|
|
|
0
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
extern purge_mode_t opt_purge;
|
|
|
|
extern const char *purge_mode_names[];
|
2015-05-05 00:58:36 +08:00
|
|
|
extern ssize_t opt_lg_dirty_mult;
|
2016-02-20 12:09:31 +08:00
|
|
|
extern ssize_t opt_decay_time;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-04-07 20:04:12 +08:00
|
|
|
extern const arena_bin_info_t arena_bin_info[NBINS];
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
|
|
|
extern size_t map_misc_offset;
|
|
|
|
extern size_t arena_maxrun; /* Max run size for arenas. */
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-02-23 06:58:05 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
typedef size_t (run_quantize_t)(size_t);
|
|
|
|
extern run_quantize_t *run_quantize_floor;
|
|
|
|
extern run_quantize_t *run_quantize_ceil;
|
|
|
|
#endif
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
|
|
|
bool *zero);
|
|
|
|
void arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
2016-05-24 05:56:35 +08:00
|
|
|
chunk_hooks_t *chunk_hooks, extent_t *extent);
|
2016-03-24 12:09:28 +08:00
|
|
|
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
|
2015-02-18 17:15:50 +08:00
|
|
|
bool cache);
|
2016-03-24 12:09:28 +08:00
|
|
|
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
2015-02-18 17:15:50 +08:00
|
|
|
bool cache);
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
size_t usize, size_t alignment, bool *zero);
|
2016-05-28 15:17:28 +08:00
|
|
|
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
|
|
bool locked);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *extent, size_t oldsize);
|
|
|
|
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_t *extent, size_t oldsize);
|
2016-05-11 13:21:10 +08:00
|
|
|
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
|
2016-04-14 14:36:15 +08:00
|
|
|
ssize_t lg_dirty_mult);
|
2016-05-11 13:21:10 +08:00
|
|
|
ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
|
|
|
|
void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
|
|
|
|
void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
|
2016-04-23 05:37:17 +08:00
|
|
|
void arena_reset(tsd_t *tsd, arena_t *arena);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
2016-04-07 20:04:12 +08:00
|
|
|
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
|
2012-04-06 15:35:09 +08:00
|
|
|
bool zero);
|
2013-12-18 07:14:36 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
2016-04-07 20:04:12 +08:00
|
|
|
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
|
2014-01-08 08:47:56 +08:00
|
|
|
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
|
|
|
|
#else
|
2016-04-07 20:04:12 +08:00
|
|
|
void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
|
2014-01-08 08:47:56 +08:00
|
|
|
#endif
|
2016-05-11 13:21:10 +08:00
|
|
|
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
|
|
|
szind_t ind, bool zero);
|
|
|
|
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2015-02-13 06:06:37 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache);
|
2016-05-28 15:17:28 +08:00
|
|
|
void arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|
|
|
size_t usize);
|
|
|
|
void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|
|
|
tcache_t *tcache, bool slow_path);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_chunk_t *chunk, extent_t *extent, void *ptr,
|
|
|
|
arena_chunk_map_bits_t *bitselm);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
2016-03-24 11:29:33 +08:00
|
|
|
extent_t *extent, void *ptr, size_t pageind);
|
|
|
|
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|
|
|
size_t oldsize, size_t size, size_t extra, bool zero);
|
|
|
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
|
|
|
size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache);
|
2016-05-11 13:21:10 +08:00
|
|
|
dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
|
2015-03-19 09:55:33 +08:00
|
|
|
ssize_t arena_lg_dirty_mult_default_get(void);
|
|
|
|
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
|
2016-02-20 12:09:31 +08:00
|
|
|
ssize_t arena_decay_time_default_get(void);
|
|
|
|
bool arena_decay_time_default_set(ssize_t decay_time);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
|
|
|
|
ssize_t *decay_time, size_t *nactive, size_t *ndirty);
|
|
|
|
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2016-04-14 14:36:15 +08:00
|
|
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
|
|
|
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
2016-05-28 15:17:28 +08:00
|
|
|
malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats);
|
2016-04-23 05:34:14 +08:00
|
|
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
|
|
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
|
|
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
|
2016-04-09 05:16:19 +08:00
|
|
|
void arena_boot(void);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
|
2014-08-30 04:34:40 +08:00
|
|
|
size_t pageind);
|
2016-03-24 07:04:38 +08:00
|
|
|
const arena_chunk_map_bits_t *arena_bitselm_get_const(
|
|
|
|
const arena_chunk_t *chunk, size_t pageind);
|
|
|
|
arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
|
2014-08-30 04:34:40 +08:00
|
|
|
size_t pageind);
|
2016-03-24 07:04:38 +08:00
|
|
|
const arena_chunk_map_misc_t *arena_miscelm_get_const(
|
|
|
|
const arena_chunk_t *chunk, size_t pageind);
|
2016-05-26 07:21:37 +08:00
|
|
|
size_t arena_miscelm_to_pageind(const extent_t *extent,
|
|
|
|
const arena_chunk_map_misc_t *miscelm);
|
|
|
|
void *arena_miscelm_to_rpages(const extent_t *extent,
|
|
|
|
const arena_chunk_map_misc_t *miscelm);
|
|
|
|
arena_chunk_map_misc_t *arena_rd_to_miscelm(const extent_t *extent,
|
|
|
|
arena_runs_dirty_link_t *rd);
|
|
|
|
arena_chunk_map_misc_t *arena_run_to_miscelm(const extent_t *extent,
|
|
|
|
arena_run_t *run);
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
|
|
|
|
const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
|
|
|
size_t arena_mapbitsp_read(const size_t *mapbitsp);
|
|
|
|
size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
|
2015-08-20 05:12:05 +08:00
|
|
|
size_t arena_mapbits_size_decode(size_t mapbits);
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
|
|
|
size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
|
|
|
size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
|
|
|
|
size_t pageind);
|
|
|
|
szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
|
2012-05-02 15:30:36 +08:00
|
|
|
size_t pageind);
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
|
|
|
|
size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
|
2013-10-20 12:40:20 +08:00
|
|
|
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
|
2015-08-20 05:12:05 +08:00
|
|
|
size_t arena_mapbits_size_encode(size_t size);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size, size_t flags);
|
|
|
|
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size);
|
2015-08-11 14:03:34 +08:00
|
|
|
void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t flags);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size, size_t flags);
|
|
|
|
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t binind);
|
2012-05-02 15:30:36 +08:00
|
|
|
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
2015-08-20 06:21:32 +08:00
|
|
|
size_t runind, szind_t binind, size_t flags);
|
2014-11-28 03:22:36 +08:00
|
|
|
void arena_metadata_allocated_add(arena_t *arena, size_t size);
|
|
|
|
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
|
|
|
size_t arena_metadata_allocated_get(arena_t *arena);
|
2013-02-07 03:59:30 +08:00
|
|
|
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
|
|
|
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
2016-05-11 13:21:10 +08:00
|
|
|
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
|
2016-04-16 15:36:11 +08:00
|
|
|
szind_t arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr,
|
|
|
|
size_t mapbits);
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
2016-03-24 11:29:33 +08:00
|
|
|
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
|
|
|
|
const void *ptr);
|
|
|
|
void arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|
|
|
size_t usize, prof_tctx_t *tctx);
|
|
|
|
void arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|
|
|
size_t usize, const void *old_ptr, prof_tctx_t *old_tctx);
|
2016-05-11 13:21:10 +08:00
|
|
|
void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
|
|
|
|
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
2015-10-28 06:12:10 +08:00
|
|
|
bool zero, tcache_t *tcache, bool slow_path);
|
2016-04-16 15:36:11 +08:00
|
|
|
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
|
2016-03-24 11:29:33 +08:00
|
|
|
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|
|
|
tcache_t *tcache, bool slow_path);
|
|
|
|
void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
|
|
|
tcache_t *tcache, bool slow_path);
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
2012-05-02 15:30:36 +08:00
|
|
|
# ifdef JEMALLOC_ARENA_INLINE_A
|
2014-08-30 04:34:40 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
2014-08-30 04:34:40 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(pageind >= map_bias);
|
|
|
|
assert(pageind < chunk_npages);
|
|
|
|
|
|
|
|
return (&chunk->map_bits[pageind-map_bias]);
|
|
|
|
}
|
|
|
|
|
2016-03-24 07:04:38 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
|
|
|
|
arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
|
|
|
|
}
|
|
|
|
|
2014-08-30 04:34:40 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(pageind >= map_bias);
|
|
|
|
assert(pageind < chunk_npages);
|
|
|
|
|
2014-08-30 04:34:40 +08:00
|
|
|
return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
|
|
|
|
(uintptr_t)map_misc_offset) + pageind-map_bias);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 07:04:38 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
|
|
|
|
arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
|
|
|
|
}
|
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-05-26 07:21:37 +08:00
|
|
|
arena_miscelm_to_pageind(const extent_t *extent,
|
|
|
|
const arena_chunk_map_misc_t *miscelm)
|
2014-09-29 16:31:39 +08:00
|
|
|
{
|
2016-05-28 09:57:15 +08:00
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
2014-09-29 16:31:39 +08:00
|
|
|
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
|
|
|
|
map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
|
|
|
|
|
|
|
|
assert(pageind >= map_bias);
|
|
|
|
assert(pageind < chunk_npages);
|
|
|
|
|
|
|
|
return (pageind);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2016-05-26 07:21:37 +08:00
|
|
|
arena_miscelm_to_rpages(const extent_t *extent,
|
|
|
|
const arena_chunk_map_misc_t *miscelm)
|
2014-09-29 16:31:39 +08:00
|
|
|
{
|
2016-05-28 09:57:15 +08:00
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
2016-05-26 07:21:37 +08:00
|
|
|
size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
|
2014-09-29 16:31:39 +08:00
|
|
|
|
|
|
|
return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
|
|
|
|
}
|
|
|
|
|
2015-03-11 09:15:40 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
2016-05-26 07:21:37 +08:00
|
|
|
arena_rd_to_miscelm(const extent_t *extent, arena_runs_dirty_link_t *rd)
|
2015-03-11 09:15:40 +08:00
|
|
|
{
|
|
|
|
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
|
2016-03-08 17:04:48 +08:00
|
|
|
*)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
|
2016-03-01 03:54:42 +08:00
|
|
|
|
2016-05-26 07:21:37 +08:00
|
|
|
assert(arena_miscelm_to_pageind(extent, miscelm) >= map_bias);
|
|
|
|
assert(arena_miscelm_to_pageind(extent, miscelm) < chunk_npages);
|
2015-03-11 09:15:40 +08:00
|
|
|
|
|
|
|
return (miscelm);
|
|
|
|
}
|
|
|
|
|
2014-09-29 16:31:39 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
2016-05-26 07:21:37 +08:00
|
|
|
arena_run_to_miscelm(const extent_t *extent, arena_run_t *run)
|
2014-09-29 16:31:39 +08:00
|
|
|
{
|
|
|
|
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
|
|
|
|
*)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
|
|
|
|
|
2016-05-26 07:21:37 +08:00
|
|
|
assert(arena_miscelm_to_pageind(extent, miscelm) >= map_bias);
|
|
|
|
assert(arena_miscelm_to_pageind(extent, miscelm) < chunk_npages);
|
2014-09-29 16:31:39 +08:00
|
|
|
|
|
|
|
return (miscelm);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t *
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE const size_t *
|
|
|
|
arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 07:04:38 +08:00
|
|
|
return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 12:40:20 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbitsp_read(const size_t *mapbitsp)
|
2013-10-20 12:40:20 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
return (*mapbitsp);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 07:04:38 +08:00
|
|
|
return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2015-08-20 05:12:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_mapbits_size_decode(size_t mapbits)
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
|
2015-08-20 14:28:34 +08:00
|
|
|
#if CHUNK_MAP_SIZE_SHIFT > 0
|
|
|
|
size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
|
|
|
|
#elif CHUNK_MAP_SIZE_SHIFT == 0
|
|
|
|
size = mapbits & CHUNK_MAP_SIZE_MASK;
|
|
|
|
#else
|
|
|
|
size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
|
|
|
|
#endif
|
2015-08-20 05:12:05 +08:00
|
|
|
|
|
|
|
return (size);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
2015-08-20 05:12:05 +08:00
|
|
|
return (arena_mapbits_size_decode(mapbits));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
|
|
|
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
|
2015-08-20 05:12:05 +08:00
|
|
|
return (arena_mapbits_size_decode(mapbits));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
|
|
|
CHUNK_MAP_ALLOCATED);
|
2015-08-05 01:49:46 +08:00
|
|
|
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2015-08-20 06:21:32 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE szind_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-03 07:11:03 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t binind;
|
2012-05-03 07:11:03 +08:00
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
|
|
|
assert(binind < NBINS || binind == BININD_INVALID);
|
|
|
|
return (binind);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
2015-08-05 01:49:46 +08:00
|
|
|
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
|
|
|
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
2012-05-02 15:30:36 +08:00
|
|
|
return (mapbits & CHUNK_MAP_DIRTY);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
2015-08-05 01:49:46 +08:00
|
|
|
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
|
|
|
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
2012-05-02 15:30:36 +08:00
|
|
|
return (mapbits & CHUNK_MAP_UNZEROED);
|
|
|
|
}
|
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
|
2015-08-05 01:49:46 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
|
|
|
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
|
|
|
return (mapbits & CHUNK_MAP_DECOMMITTED);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
return (mapbits & CHUNK_MAP_LARGE);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
|
|
|
mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
return (mapbits & CHUNK_MAP_ALLOCATED);
|
|
|
|
}
|
|
|
|
|
2013-10-20 12:40:20 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
|
|
|
|
{
|
|
|
|
|
|
|
|
*mapbitsp = mapbits;
|
|
|
|
}
|
|
|
|
|
2015-08-20 05:12:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
|
|
|
arena_mapbits_size_encode(size_t size)
|
|
|
|
{
|
|
|
|
size_t mapbits;
|
|
|
|
|
2015-08-20 14:28:34 +08:00
|
|
|
#if CHUNK_MAP_SIZE_SHIFT > 0
|
|
|
|
mapbits = size << CHUNK_MAP_SIZE_SHIFT;
|
|
|
|
#elif CHUNK_MAP_SIZE_SHIFT == 0
|
|
|
|
mapbits = size;
|
|
|
|
#else
|
|
|
|
mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
|
|
|
|
#endif
|
2015-08-20 05:12:05 +08:00
|
|
|
|
|
|
|
assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
|
|
|
|
return (mapbits);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|
|
|
size_t flags)
|
|
|
|
{
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
assert((size & PAGE_MASK) == 0);
|
|
|
|
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
|
|
|
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
|
|
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
2015-08-20 05:12:05 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
2015-08-05 01:49:46 +08:00
|
|
|
CHUNK_MAP_BININD_INVALID | flags);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|
|
|
size_t size)
|
|
|
|
{
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
assert((size & PAGE_MASK) == 0);
|
2013-10-20 12:40:20 +08:00
|
|
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
2015-08-20 05:12:05 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
|
|
|
(mapbits & ~CHUNK_MAP_SIZE_MASK));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2015-08-11 14:03:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
|
|
|
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
|
|
|
|
{
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
2015-08-11 14:03:34 +08:00
|
|
|
|
|
|
|
assert((flags & CHUNK_MAP_UNZEROED) == flags);
|
|
|
|
arena_mapbitsp_write(mapbitsp, flags);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|
|
|
size_t flags)
|
|
|
|
{
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2015-08-05 01:49:46 +08:00
|
|
|
assert((size & PAGE_MASK) == 0);
|
2015-08-12 03:42:33 +08:00
|
|
|
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
|
|
|
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
|
|
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
2015-08-20 05:12:05 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
2015-08-12 03:42:33 +08:00
|
|
|
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
2015-08-05 01:49:46 +08:00
|
|
|
CHUNK_MAP_ALLOCATED);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t binind)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
2013-10-20 12:40:20 +08:00
|
|
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert(binind <= BININD_INVALID);
|
2015-05-05 00:58:36 +08:00
|
|
|
assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
|
|
|
|
large_pad);
|
2013-10-20 12:40:20 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
|
|
|
|
(binind << CHUNK_MAP_BININD_SHIFT));
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2012-05-02 15:30:36 +08:00
|
|
|
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t binind, size_t flags)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2016-03-24 07:04:38 +08:00
|
|
|
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
assert(binind < BININD_INVALID);
|
|
|
|
assert(pageind - runind >= map_bias);
|
2015-08-12 03:42:33 +08:00
|
|
|
assert((flags & CHUNK_MAP_UNZEROED) == flags);
|
2015-08-05 01:49:46 +08:00
|
|
|
arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
|
2015-08-12 03:42:33 +08:00
|
|
|
(binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
arena_metadata_allocated_add(arena_t *arena, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_add_z(&arena->stats.metadata_allocated, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
arena_metadata_allocated_sub(arena_t *arena, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_sub_z(&arena->stats.metadata_allocated, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE size_t
|
|
|
|
arena_metadata_allocated_get(arena_t *arena)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (atomic_read_z(&arena->stats.metadata_allocated));
|
|
|
|
}
|
|
|
|
|
2013-02-07 03:59:30 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2012-11-14 04:56:27 +08:00
|
|
|
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(prof_interval != 0);
|
|
|
|
|
|
|
|
arena->prof_accumbytes += accumbytes;
|
|
|
|
if (arena->prof_accumbytes >= prof_interval) {
|
|
|
|
arena->prof_accumbytes -= prof_interval;
|
2013-02-07 03:59:30 +08:00
|
|
|
return (true);
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
2013-02-07 03:59:30 +08:00
|
|
|
return (false);
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
|
|
|
|
2013-02-07 03:59:30 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2012-11-14 04:56:27 +08:00
|
|
|
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(prof_interval == 0))
|
2013-02-07 03:59:30 +08:00
|
|
|
return (false);
|
|
|
|
return (arena_prof_accum_impl(arena, accumbytes));
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
|
|
|
|
2013-02-07 03:59:30 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
|
2012-11-14 04:56:27 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(prof_interval == 0))
|
2013-02-07 03:59:30 +08:00
|
|
|
return (false);
|
|
|
|
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2013-02-07 03:59:30 +08:00
|
|
|
ret = arena_prof_accum_impl(arena, accumbytes);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2013-02-07 03:59:30 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
2012-11-14 04:56:27 +08:00
|
|
|
}
|
2016-03-24 11:29:33 +08:00
|
|
|
# endif /* JEMALLOC_ARENA_INLINE_A */
|
2012-11-14 04:56:27 +08:00
|
|
|
|
2016-03-24 11:29:33 +08:00
|
|
|
# ifdef JEMALLOC_ARENA_INLINE_B
|
2015-08-20 06:21:32 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE szind_t
|
2016-04-16 15:36:11 +08:00
|
|
|
arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr, size_t mapbits)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t binind;
|
2012-05-02 15:30:36 +08:00
|
|
|
|
|
|
|
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
|
|
|
|
|
|
|
if (config_debug) {
|
2016-03-24 11:29:33 +08:00
|
|
|
const extent_t *extent;
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_chunk_t *chunk;
|
|
|
|
arena_t *arena;
|
|
|
|
size_t pageind;
|
|
|
|
size_t actual_mapbits;
|
2014-09-29 16:31:39 +08:00
|
|
|
size_t rpages_ind;
|
2016-03-24 07:04:38 +08:00
|
|
|
const arena_run_t *run;
|
2012-05-03 07:11:03 +08:00
|
|
|
arena_bin_t *bin;
|
2015-08-20 06:21:32 +08:00
|
|
|
szind_t run_binind, actual_binind;
|
2016-04-07 20:04:12 +08:00
|
|
|
const arena_bin_info_t *bin_info;
|
2016-03-24 07:04:38 +08:00
|
|
|
const arena_chunk_map_misc_t *miscelm;
|
|
|
|
const void *rpages;
|
2012-05-03 07:11:03 +08:00
|
|
|
|
|
|
|
assert(binind != BININD_INVALID);
|
|
|
|
assert(binind < NBINS);
|
2016-04-16 15:36:11 +08:00
|
|
|
extent = iealloc(tsdn, ptr);
|
2016-05-28 09:57:15 +08:00
|
|
|
chunk = (arena_chunk_t *)extent_base_get(extent);
|
2016-03-24 11:29:33 +08:00
|
|
|
arena = extent_arena_get(extent);
|
2012-05-03 07:11:03 +08:00
|
|
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
2012-05-02 15:30:36 +08:00
|
|
|
assert(mapbits == actual_mapbits);
|
2012-05-03 07:11:03 +08:00
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
2014-09-29 16:31:39 +08:00
|
|
|
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
|
|
|
|
pageind);
|
2016-03-24 07:04:38 +08:00
|
|
|
miscelm = arena_miscelm_get_const(chunk, rpages_ind);
|
2014-09-29 16:31:39 +08:00
|
|
|
run = &miscelm->run;
|
2014-10-11 14:01:03 +08:00
|
|
|
run_binind = run->binind;
|
|
|
|
bin = &arena->bins[run_binind];
|
2016-02-25 04:42:23 +08:00
|
|
|
actual_binind = (szind_t)(bin - arena->bins);
|
2014-10-11 14:01:03 +08:00
|
|
|
assert(run_binind == actual_binind);
|
2012-05-03 07:11:03 +08:00
|
|
|
bin_info = &arena_bin_info[actual_binind];
|
2016-05-26 07:21:37 +08:00
|
|
|
rpages = arena_miscelm_to_rpages(extent, miscelm);
|
2016-04-06 09:18:15 +08:00
|
|
|
assert(((uintptr_t)ptr - (uintptr_t)rpages) % bin_info->reg_size
|
2012-05-02 15:30:36 +08:00
|
|
|
== 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (binind);
|
|
|
|
}
|
|
|
|
|
2015-08-20 06:21:32 +08:00
|
|
|
JEMALLOC_INLINE szind_t
|
2011-03-16 04:59:15 +08:00
|
|
|
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
|
|
|
{
|
2016-02-25 04:42:23 +08:00
|
|
|
szind_t binind = (szind_t)(bin - arena->bins);
|
2012-02-29 08:50:47 +08:00
|
|
|
assert(binind < NBINS);
|
2011-03-16 04:59:15 +08:00
|
|
|
return (binind);
|
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
JEMALLOC_INLINE prof_tctx_t *
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
2010-10-21 11:52:00 +08:00
|
|
|
{
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *ret;
|
2010-10-21 11:52:00 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-21 11:52:00 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
if (likely(extent_slab_get(extent))) {
|
2016-05-28 09:57:15 +08:00
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
2015-02-13 06:06:37 +08:00
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
|
|
|
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
|
|
|
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
|
|
|
|
ret = (prof_tctx_t *)(uintptr_t)1U;
|
2015-02-13 07:54:53 +08:00
|
|
|
else {
|
2016-03-24 07:04:38 +08:00
|
|
|
arena_chunk_map_misc_t *elm =
|
|
|
|
arena_miscelm_get_mutable(chunk, pageind);
|
2015-05-08 13:35:40 +08:00
|
|
|
ret = atomic_read_p(&elm->prof_tctx_pun);
|
2015-02-13 07:54:53 +08:00
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
} else
|
2016-05-19 12:02:46 +08:00
|
|
|
ret = huge_prof_tctx_get(tsdn, extent);
|
2010-10-21 11:52:00 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2010-10-23 01:45:59 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|
|
|
size_t usize, prof_tctx_t *tctx)
|
2010-10-23 01:45:59 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-23 01:45:59 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
if (likely(extent_slab_get(extent))) {
|
2016-05-28 09:57:15 +08:00
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
2015-02-13 06:06:37 +08:00
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
2015-09-03 05:52:24 +08:00
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
2013-12-16 08:21:30 +08:00
|
|
|
|
2015-09-05 01:31:41 +08:00
|
|
|
if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
|
|
|
|
(uintptr_t)1U)) {
|
2015-09-03 05:52:24 +08:00
|
|
|
arena_chunk_map_misc_t *elm;
|
|
|
|
|
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) != 0);
|
|
|
|
|
2016-03-24 07:04:38 +08:00
|
|
|
elm = arena_miscelm_get_mutable(chunk, pageind);
|
2015-05-08 13:35:40 +08:00
|
|
|
atomic_write_p(&elm->prof_tctx_pun, tctx);
|
2015-09-03 05:52:24 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* tctx must always be initialized for large runs.
|
|
|
|
* Assert that the surrounding conditional logic is
|
|
|
|
* equivalent to checking whether ptr refers to a large
|
|
|
|
* run.
|
|
|
|
*/
|
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
2015-02-13 07:54:53 +08:00
|
|
|
}
|
2015-02-13 06:06:37 +08:00
|
|
|
} else
|
2016-05-19 12:02:46 +08:00
|
|
|
huge_prof_tctx_set(tsdn, extent, tctx);
|
2010-10-23 01:45:59 +08:00
|
|
|
}
|
2010-10-21 11:52:00 +08:00
|
|
|
|
2015-09-15 14:48:11 +08:00
|
|
|
JEMALLOC_INLINE void
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|
|
|
size_t usize, const void *old_ptr, prof_tctx_t *old_tctx)
|
2015-09-15 14:48:11 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
|
|
|
if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
|
|
|
|
(uintptr_t)old_tctx > (uintptr_t)1U))) {
|
2016-05-28 09:57:15 +08:00
|
|
|
if (likely(extent_slab_get(extent))) {
|
|
|
|
arena_chunk_t *chunk =
|
|
|
|
(arena_chunk_t *)extent_base_get(extent);
|
2015-09-15 14:48:11 +08:00
|
|
|
size_t pageind;
|
|
|
|
arena_chunk_map_misc_t *elm;
|
|
|
|
|
|
|
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
|
|
|
LG_PAGE;
|
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) !=
|
|
|
|
0);
|
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) != 0);
|
|
|
|
|
2016-03-24 07:04:38 +08:00
|
|
|
elm = arena_miscelm_get_mutable(chunk, pageind);
|
2015-09-15 14:48:11 +08:00
|
|
|
atomic_write_p(&elm->prof_tctx_pun,
|
|
|
|
(prof_tctx_t *)(uintptr_t)1U);
|
|
|
|
} else
|
2016-05-19 12:02:46 +08:00
|
|
|
huge_prof_tctx_reset(tsdn, extent);
|
2015-09-15 14:48:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
2016-02-20 12:09:31 +08:00
|
|
|
ticker_t *decay_ticker;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
if (unlikely(tsdn_null(tsdn)))
|
2016-02-20 12:09:31 +08:00
|
|
|
return;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd = tsdn_tsd(tsdn);
|
2016-02-20 12:09:31 +08:00
|
|
|
decay_ticker = decay_ticker_get(tsd, arena->ind);
|
|
|
|
if (unlikely(decay_ticker == NULL))
|
|
|
|
return;
|
|
|
|
if (unlikely(ticker_ticks(decay_ticker, nticks)))
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge(tsdn, arena, false);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_ticks(tsdn, arena, 1);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
2016-02-20 10:40:03 +08:00
|
|
|
tcache_t *tcache, bool slow_path)
|
2012-02-14 04:29:49 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
2012-02-14 04:29:49 +08:00
|
|
|
assert(size != 0);
|
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
if (likely(size <= SMALL_MAXCLASS)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
|
|
|
tcache, size, ind, zero, slow_path));
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
|
|
|
if (likely(size <= tcache_maxclass)) {
|
2016-05-28 15:17:28 +08:00
|
|
|
return (tcache_alloc_huge(tsdn_tsd(tsdn), arena,
|
2016-05-11 13:21:10 +08:00
|
|
|
tcache, size, ind, zero, slow_path));
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
|
|
|
/* (size > tcache_maxclass) case falls through. */
|
|
|
|
assert(size > tcache_maxclass);
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
return (arena_malloc_hard(tsdn, arena, size, ind, zero));
|
2012-02-14 04:29:49 +08:00
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
2016-04-16 15:36:11 +08:00
|
|
|
arena_aalloc(tsdn_t *tsdn, const void *ptr)
|
2014-11-28 03:22:36 +08:00
|
|
|
{
|
|
|
|
|
2016-04-16 15:36:11 +08:00
|
|
|
return (extent_arena_get(iealloc(tsdn, ptr)));
|
2014-11-28 03:22:36 +08:00
|
|
|
}
|
|
|
|
|
2012-04-20 09:28:03 +08:00
|
|
|
/* Return the size of the allocation pointed to by ptr. */
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE size_t
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
2012-04-20 09:28:03 +08:00
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
if (likely(extent_slab_get(extent))) {
|
2016-03-24 11:29:33 +08:00
|
|
|
const arena_chunk_t *chunk =
|
2016-05-28 09:57:15 +08:00
|
|
|
(const arena_chunk_t *)extent_base_get(extent);
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
szind_t binind;
|
2016-03-24 11:29:33 +08:00
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
|
|
binind = arena_mapbits_binind_get(chunk, pageind);
|
2016-05-28 15:17:28 +08:00
|
|
|
/* Small allocation. */
|
|
|
|
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
|
|
|
arena_ptr_small_binind_get(tsdn, ptr,
|
|
|
|
arena_mapbits_get(chunk, pageind)) == binind);
|
|
|
|
ret = index2size(binind);
|
2015-02-13 06:06:37 +08:00
|
|
|
} else
|
2016-05-19 12:02:46 +08:00
|
|
|
ret = huge_salloc(tsdn, extent);
|
2012-04-20 09:28:03 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
|
|
|
bool slow_path)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
2010-01-17 01:53:50 +08:00
|
|
|
assert(ptr != NULL);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
if (likely(extent_slab_get(extent))) {
|
2016-05-28 15:17:28 +08:00
|
|
|
/* Small allocation. */
|
2016-05-28 09:57:15 +08:00
|
|
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
2015-02-13 06:06:37 +08:00
|
|
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
2016-05-28 15:17:28 +08:00
|
|
|
assert((mapbits & CHUNK_MAP_LARGE) == 0);
|
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
szind_t binind = arena_ptr_small_binind_get(tsdn, ptr,
|
|
|
|
mapbits);
|
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
|
|
|
slow_path);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
} else {
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
|
|
|
chunk, extent, ptr, pageind);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size_t usize = extent_usize_get(extent);
|
2012-02-14 04:29:49 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
if (likely(tcache != NULL) && usize <= tcache_maxclass) {
|
|
|
|
if (config_prof && unlikely(usize <= SMALL_MAXCLASS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, extent, ptr,
|
|
|
|
tcache, slow_path);
|
2015-05-05 00:58:36 +08:00
|
|
|
} else {
|
2016-05-28 15:17:28 +08:00
|
|
|
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
|
|
|
usize, slow_path);
|
2015-02-13 06:06:37 +08:00
|
|
|
}
|
2016-05-28 15:17:28 +08:00
|
|
|
} else
|
|
|
|
huge_dalloc(tsdn, extent);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
|
|
|
tcache_t *tcache, bool slow_path)
|
2014-08-29 03:41:48 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(ptr != NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
|
2016-04-07 22:24:14 +08:00
|
|
|
if (likely(extent_slab_get(extent))) {
|
2016-05-28 15:17:28 +08:00
|
|
|
/* Small allocation. */
|
|
|
|
if (likely(tcache != NULL)) {
|
|
|
|
szind_t binind = size2index(size);
|
|
|
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
|
|
|
slow_path);
|
|
|
|
} else {
|
|
|
|
arena_chunk_t *chunk =
|
|
|
|
(arena_chunk_t *)extent_base_get(extent);
|
|
|
|
size_t pageind = ((uintptr_t)ptr -
|
|
|
|
(uintptr_t)chunk) >> LG_PAGE;
|
|
|
|
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
|
|
|
chunk, extent, ptr, pageind);
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
2016-05-28 15:17:28 +08:00
|
|
|
} else {
|
|
|
|
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
|
|
|
if (config_prof && unlikely(size <= SMALL_MAXCLASS)) {
|
|
|
|
arena_dalloc_promoted(tsdn, extent, ptr,
|
|
|
|
tcache, slow_path);
|
2015-02-13 06:06:37 +08:00
|
|
|
} else {
|
2016-05-28 15:17:28 +08:00
|
|
|
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
2016-05-11 13:21:10 +08:00
|
|
|
size, slow_path);
|
2015-02-13 06:06:37 +08:00
|
|
|
}
|
2016-05-28 15:17:28 +08:00
|
|
|
} else
|
|
|
|
huge_dalloc(tsdn, extent);
|
|
|
|
}
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
2014-10-06 08:54:10 +08:00
|
|
|
# endif /* JEMALLOC_ARENA_INLINE_B */
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|