2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
|
|
|
/*
|
|
|
|
* Read-only information associated with each element of arena_t's bins array
|
|
|
|
* is stored separately, partly to reduce memory usage (only one copy, rather
|
|
|
|
* than one per arena), but mainly to avoid false cacheline sharing.
|
|
|
|
*
|
|
|
|
* Each slab has the following layout:
|
|
|
|
*
|
|
|
|
* /--------------------\
|
|
|
|
* | region 0 |
|
|
|
|
* |--------------------|
|
|
|
|
* | region 1 |
|
|
|
|
* |--------------------|
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* |--------------------|
|
|
|
|
* | region nregs-1 |
|
|
|
|
* \--------------------/
|
|
|
|
*/
|
|
|
|
struct arena_bin_info_s {
|
|
|
|
/* Size of regions in a slab for this bin's size class. */
|
|
|
|
size_t reg_size;
|
|
|
|
|
|
|
|
/* Total size of a slab for this bin's size class. */
|
|
|
|
size_t slab_size;
|
|
|
|
|
|
|
|
/* Total number of regions in a slab for this bin's size class. */
|
|
|
|
uint32_t nregs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Metadata used to manipulate bitmaps for slabs associated with this
|
|
|
|
* bin.
|
|
|
|
*/
|
|
|
|
bitmap_info_t bitmap_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct arena_decay_s {
|
2017-03-02 03:21:18 +08:00
|
|
|
/* Synchronizes all non-atomic fields. */
|
2017-02-14 03:02:32 +08:00
|
|
|
malloc_mutex_t mtx;
|
2017-03-08 11:52:57 +08:00
|
|
|
/*
|
|
|
|
* True if a thread is currently purging the extents associated with
|
|
|
|
* this decay structure.
|
|
|
|
*/
|
|
|
|
bool purging;
|
2017-01-11 10:06:31 +08:00
|
|
|
/*
|
|
|
|
* Approximate time in seconds from the creation of a set of unused
|
|
|
|
* dirty pages until an equivalent set of unused dirty pages is purged
|
|
|
|
* and/or reused.
|
|
|
|
*/
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
atomic_zd_t time;
|
2017-01-11 10:06:31 +08:00
|
|
|
/* time / SMOOTHSTEP_NSTEPS. */
|
|
|
|
nstime_t interval;
|
|
|
|
/*
|
|
|
|
* Time at which the current decay interval logically started. We do
|
|
|
|
* not actually advance to a new epoch until sometime after it starts
|
|
|
|
* because of scheduling and computation delays, and it is even possible
|
|
|
|
* to completely skip epochs. In all cases, during epoch advancement we
|
|
|
|
* merge all relevant activity into the most recently recorded epoch.
|
|
|
|
*/
|
|
|
|
nstime_t epoch;
|
|
|
|
/* Deadline randomness generator. */
|
|
|
|
uint64_t jitter_state;
|
|
|
|
/*
|
|
|
|
* Deadline for current epoch. This is the sum of interval and per
|
|
|
|
* epoch jitter which is a uniform random variable in [0..interval).
|
|
|
|
* Epochs always advance by precise multiples of interval, but we
|
|
|
|
* randomize the deadline to reduce the likelihood of arenas purging in
|
|
|
|
* lockstep.
|
|
|
|
*/
|
|
|
|
nstime_t deadline;
|
|
|
|
/*
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
* Number of unpurged pages at beginning of current epoch. During epoch
|
|
|
|
* advancement we use the delta between arena->decay_*.nunpurged and
|
|
|
|
* extents_npages_get(&arena->extents_*) to determine how many dirty
|
|
|
|
* pages, if any, were generated.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
|
|
|
size_t nunpurged;
|
|
|
|
/*
|
|
|
|
* Trailing log of how many unused dirty pages were generated during
|
|
|
|
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
|
|
|
* element is the most recent epoch. Corresponding epoch times are
|
|
|
|
* relative to epoch.
|
|
|
|
*/
|
|
|
|
size_t backlog[SMOOTHSTEP_NSTEPS];
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pointer to associated stats. These stats are embedded directly in
|
|
|
|
* the arena's stats due to how stats structures are shared between the
|
|
|
|
* arena and ctl code.
|
|
|
|
*
|
|
|
|
* Synchronization: Same as associated arena's stats field. */
|
|
|
|
decay_stats_t *stats;
|
2017-01-11 10:06:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct arena_bin_s {
|
|
|
|
/* All operations on arena_bin_t fields require lock ownership. */
|
|
|
|
malloc_mutex_t lock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Current slab being used to service allocations of this bin's size
|
|
|
|
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
|
|
|
* slabcur is reassigned, the previous slab must be deallocated or
|
|
|
|
* inserted into slabs_{nonfull,full}.
|
|
|
|
*/
|
|
|
|
extent_t *slabcur;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Heap of non-full slabs. This heap is used to assure that new
|
|
|
|
* allocations come from the non-full slab that is oldest/lowest in
|
|
|
|
* memory.
|
|
|
|
*/
|
|
|
|
extent_heap_t slabs_nonfull;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* List used to track full slabs. */
|
|
|
|
extent_list_t slabs_full;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
/* Bin statistics. */
|
|
|
|
malloc_bin_stats_t stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct arena_s {
|
|
|
|
/*
|
2017-01-30 13:57:14 +08:00
|
|
|
* Number of threads currently assigned to this arena. Each thread has
|
|
|
|
* two distinct assignments, one for application-serving allocation, and
|
|
|
|
* the other for internal metadata allocation. Internal metadata must
|
|
|
|
* not be allocated from arenas explicitly created via the arenas.create
|
|
|
|
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
|
|
|
* discards all allocations for the affected arena.
|
2017-01-11 10:06:31 +08:00
|
|
|
*
|
|
|
|
* 0: Application allocation.
|
|
|
|
* 1: Internal metadata allocation.
|
2017-01-30 13:57:14 +08:00
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
|
|
|
unsigned nthreads[2];
|
|
|
|
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
/*
|
|
|
|
* When percpu_arena is enabled, to amortize the cost of reading /
|
|
|
|
* updating the current CPU id, track the most recent thread accessing
|
|
|
|
* this arena, and only read CPU if there is a mismatch.
|
|
|
|
*/
|
|
|
|
tsdn_t *last_thd;
|
|
|
|
|
2017-02-13 09:43:33 +08:00
|
|
|
/* Synchronization: internal. */
|
2017-01-11 10:06:31 +08:00
|
|
|
arena_stats_t stats;
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
/*
|
|
|
|
* List of tcaches for extant threads associated with this arena.
|
|
|
|
* Stats from these are merged incrementally, and at exit if
|
|
|
|
* opt_stats_print is enabled.
|
2017-01-30 13:57:14 +08:00
|
|
|
*
|
2017-02-13 10:50:53 +08:00
|
|
|
* Synchronization: tcache_ql_mtx.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
|
|
|
ql_head(tcache_t) tcache_ql;
|
2017-02-13 10:50:53 +08:00
|
|
|
malloc_mutex_t tcache_ql_mtx;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-02-13 09:03:46 +08:00
|
|
|
/* Synchronization: internal. */
|
|
|
|
prof_accum_t prof_accum;
|
2017-01-11 10:06:31 +08:00
|
|
|
uint64_t prof_accumbytes;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PRNG state for cache index randomization of large allocation base
|
|
|
|
* pointers.
|
2017-01-30 13:57:14 +08:00
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
|
|
|
size_t offset_state;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Extent serial number generator state.
|
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
|
|
|
*/
|
2017-01-11 10:06:31 +08:00
|
|
|
size_t extent_sn_next;
|
|
|
|
|
2017-02-13 08:34:36 +08:00
|
|
|
/* Synchronization: atomic. */
|
2017-01-11 10:06:31 +08:00
|
|
|
dss_prec_t dss_prec;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Number of pages in active extents.
|
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
|
|
|
*/
|
2017-01-11 10:06:31 +08:00
|
|
|
size_t nactive;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Extant large allocations.
|
|
|
|
*
|
|
|
|
* Synchronization: large_mtx.
|
|
|
|
*/
|
|
|
|
extent_list_t large;
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Synchronizes all large allocation/update/deallocation. */
|
|
|
|
malloc_mutex_t large_mtx;
|
|
|
|
|
|
|
|
/*
|
2017-01-30 13:57:14 +08:00
|
|
|
* Collections of extents that were previously allocated. These are
|
|
|
|
* used when allocating extents, in an attempt to re-use address space.
|
|
|
|
*
|
|
|
|
* Synchronization: internal.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
extents_t extents_dirty;
|
|
|
|
extents_t extents_muzzy;
|
2017-01-30 13:57:14 +08:00
|
|
|
extents_t extents_retained;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-08 01:22:33 +08:00
|
|
|
/*
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
* Decay-based purging state, responsible for scheduling extent state
|
|
|
|
* transitions.
|
2017-03-08 01:22:33 +08:00
|
|
|
*
|
|
|
|
* Synchronization: internal.
|
|
|
|
*/
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
arena_decay_t decay_dirty; /* dirty --> muzzy */
|
|
|
|
arena_decay_t decay_muzzy; /* muzzy --> retained */
|
2017-03-08 01:22:33 +08:00
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
/*
|
|
|
|
* Next extent size class in a growing series to use when satisfying a
|
|
|
|
* request via the extent hooks (only if !config_munmap). This limits
|
|
|
|
* the number of disjoint virtual memory ranges so that extent merging
|
|
|
|
* can be effective even if multiple arenas' extent allocation requests
|
|
|
|
* are highly interleaved.
|
2017-01-30 13:57:14 +08:00
|
|
|
*
|
|
|
|
* Synchronization: atomic.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
|
|
|
pszind_t extent_grow_next;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Freelist of extent structures that were allocated via base_alloc().
|
|
|
|
*
|
|
|
|
* Synchronization: extent_freelist_mtx.
|
|
|
|
*/
|
|
|
|
extent_list_t extent_freelist;
|
|
|
|
malloc_mutex_t extent_freelist_mtx;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* bins is used to store heaps of free regions.
|
|
|
|
*
|
|
|
|
* Synchronization: internal.
|
|
|
|
*/
|
2017-01-11 10:06:31 +08:00
|
|
|
arena_bin_t bins[NBINS];
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Base allocator, from which arena metadata are allocated.
|
|
|
|
*
|
|
|
|
* Synchronization: internal.
|
|
|
|
*/
|
2017-01-11 10:06:31 +08:00
|
|
|
base_t *base;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
|
|
|
struct arena_tdata_s {
|
|
|
|
ticker_t decay_ticker;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
|