2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
|
|
|
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
|
|
|
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/bit_util.h"
|
2017-04-21 05:32:24 +08:00
|
|
|
#include "jemalloc/internal/bitmap.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-04-11 08:11:33 +08:00
|
|
|
#include "jemalloc/internal/ql.h"
|
2017-04-17 13:31:16 +08:00
|
|
|
#include "jemalloc/internal/ph.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2019-09-21 07:36:42 +08:00
|
|
|
#include "jemalloc/internal/slab_data.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
typedef enum {
|
|
|
|
extent_state_active = 0,
|
|
|
|
extent_state_dirty = 1,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
extent_state_muzzy = 2,
|
|
|
|
extent_state_retained = 3
|
2017-01-30 13:57:14 +08:00
|
|
|
} extent_state_t;
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
|
|
|
struct extent_s {
|
|
|
|
/*
|
2017-03-25 06:22:26 +08:00
|
|
|
* Bitfield containing several fields:
|
|
|
|
*
|
|
|
|
* a: arena_ind
|
|
|
|
* b: slab
|
|
|
|
* c: committed
|
2017-09-19 08:25:57 +08:00
|
|
|
* d: dumpable
|
2017-03-25 06:22:26 +08:00
|
|
|
* z: zeroed
|
|
|
|
* t: state
|
|
|
|
* i: szind
|
2017-03-28 07:41:47 +08:00
|
|
|
* f: nfree
|
2018-11-13 07:56:04 +08:00
|
|
|
* s: bin_shard
|
2017-04-17 12:51:26 +08:00
|
|
|
* n: sn
|
2017-03-25 06:22:26 +08:00
|
|
|
*
|
2018-11-13 07:56:04 +08:00
|
|
|
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
|
2017-03-25 06:22:26 +08:00
|
|
|
*
|
|
|
|
* arena_ind: Arena from which this extent came, or all 1 bits if
|
|
|
|
* unassociated.
|
|
|
|
*
|
|
|
|
* slab: The slab flag indicates whether the extent is used for a slab
|
|
|
|
* of small regions. This helps differentiate small size classes,
|
|
|
|
* and it indicates whether interior pointers can be looked up via
|
|
|
|
* iealloc().
|
|
|
|
*
|
|
|
|
* committed: The committed flag indicates whether physical memory is
|
|
|
|
* committed to the extent, whether explicitly or implicitly
|
|
|
|
* as on a system that overcommits and satisfies physical
|
|
|
|
* memory needs on demand via soft page faults.
|
|
|
|
*
|
2017-09-19 08:25:57 +08:00
|
|
|
* dumpable: The dumpable flag indicates whether or not we've set the
|
|
|
|
* memory in question to be dumpable. Note that this
|
|
|
|
* interacts somewhat subtly with user-specified extent hooks,
|
|
|
|
* since we don't know if *they* are fiddling with
|
|
|
|
* dumpability (in which case, we don't want to undo whatever
|
|
|
|
* they're doing). To deal with this scenario, we:
|
|
|
|
* - Make dumpable false only for memory allocated with the
|
|
|
|
* default hooks.
|
|
|
|
* - Only allow memory to go from non-dumpable to dumpable,
|
|
|
|
* and only once.
|
|
|
|
* - Never make the OS call to allow dumping when the
|
|
|
|
* dumpable bit is already set.
|
|
|
|
* These three constraints mean that we will never
|
|
|
|
* accidentally dump user memory that the user meant to set
|
|
|
|
* nondumpable with their extent hooks.
|
|
|
|
*
|
|
|
|
*
|
2017-03-25 06:22:26 +08:00
|
|
|
* zeroed: The zeroed flag is used by extent recycling code to track
|
|
|
|
* whether memory is zero-filled.
|
|
|
|
*
|
|
|
|
* state: The state flag is an extent_state_t.
|
2017-01-11 10:06:31 +08:00
|
|
|
*
|
2017-03-25 06:22:26 +08:00
|
|
|
* szind: The szind flag indicates usable size class index for
|
|
|
|
* allocations residing in this extent, regardless of whether the
|
|
|
|
* extent is a slab. Extent size and usable size often differ
|
2017-05-31 01:45:37 +08:00
|
|
|
* even for non-slabs, either due to sz_large_pad or promotion of
|
2017-03-25 06:22:26 +08:00
|
|
|
* sampled small regions.
|
2017-01-11 10:06:31 +08:00
|
|
|
*
|
2017-03-28 07:41:47 +08:00
|
|
|
* nfree: Number of free regions in slab.
|
|
|
|
*
|
2018-11-13 07:56:04 +08:00
|
|
|
* bin_shard: the shard of the bin from which this extent came.
|
|
|
|
*
|
2017-03-25 06:22:26 +08:00
|
|
|
* sn: Serial number (potentially non-unique).
|
|
|
|
*
|
2017-04-27 07:26:12 +08:00
|
|
|
* Serial numbers may wrap around if !opt_retain, but as long as
|
|
|
|
* comparison functions fall back on address comparison for equal
|
|
|
|
* serial numbers, stable (if imperfect) ordering is maintained.
|
2017-03-25 06:22:26 +08:00
|
|
|
*
|
|
|
|
* Serial numbers may not be unique even in the absence of
|
|
|
|
* wrap-around, e.g. when splitting an extent and assigning the same
|
|
|
|
* serial number to both resulting adjacent extents.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
2017-03-25 06:22:26 +08:00
|
|
|
uint64_t e_bits;
|
2018-01-03 13:10:01 +08:00
|
|
|
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
|
|
|
#define EXTENT_BITS_ARENA_SHIFT 0
|
|
|
|
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_SLAB_WIDTH 1
|
|
|
|
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
|
|
|
|
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_COMMITTED_WIDTH 1
|
|
|
|
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
|
|
|
|
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
|
2017-09-19 08:25:57 +08:00
|
|
|
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_DUMPABLE_WIDTH 1
|
|
|
|
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
|
|
|
|
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_ZEROED_WIDTH 1
|
|
|
|
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
|
|
|
|
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
|
2017-03-25 06:22:26 +08:00
|
|
|
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_STATE_WIDTH 2
|
|
|
|
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
|
|
|
|
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
|
2017-03-25 06:22:26 +08:00
|
|
|
|
2017-12-15 04:46:39 +08:00
|
|
|
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
|
|
|
|
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
|
2017-03-28 07:41:47 +08:00
|
|
|
|
2019-09-21 07:36:42 +08:00
|
|
|
#define EXTENT_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
|
|
|
|
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
|
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
#define EXTENT_BITS_BINSHARD_WIDTH 6
|
|
|
|
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
|
|
|
|
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
|
|
|
|
|
2019-07-13 07:20:23 +08:00
|
|
|
#define EXTENT_BITS_IS_HEAD_WIDTH 1
|
|
|
|
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
|
|
|
|
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
|
|
|
|
|
|
|
|
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
|
2018-01-03 13:10:01 +08:00
|
|
|
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
2017-03-25 06:22:26 +08:00
|
|
|
|
|
|
|
/* Pointer to the extent that this structure is responsible for. */
|
|
|
|
void *e_addr;
|
|
|
|
|
2017-04-17 12:51:26 +08:00
|
|
|
union {
|
|
|
|
/*
|
|
|
|
* Extent size and serial number associated with the extent
|
|
|
|
* structure (different than the serial number for the extent at
|
|
|
|
* e_addr).
|
|
|
|
*
|
|
|
|
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
|
|
|
*/
|
|
|
|
size_t e_size_esn;
|
|
|
|
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
|
|
|
|
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
|
|
|
|
/* Base extent size, which may not be a multiple of PAGE. */
|
|
|
|
size_t e_bsize;
|
|
|
|
};
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-09-27 08:22:01 +08:00
|
|
|
/*
|
|
|
|
* List linkage, used by a variety of lists:
|
2017-10-02 08:22:06 +08:00
|
|
|
* - bin_t's slabs_full
|
2017-09-27 08:22:01 +08:00
|
|
|
* - extents_t's LRU
|
|
|
|
* - stashed dirty extents
|
|
|
|
* - arena's large allocations
|
|
|
|
*/
|
|
|
|
ql_elm(extent_t) ql_link;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-09-19 08:25:57 +08:00
|
|
|
/*
|
2017-09-27 08:22:01 +08:00
|
|
|
* Linkage for per size class sn/address-ordered heaps, and
|
|
|
|
* for extent_avail
|
|
|
|
*/
|
2017-01-30 13:57:14 +08:00
|
|
|
phn(extent_t) ph_link;
|
2017-03-28 07:41:47 +08:00
|
|
|
|
|
|
|
union {
|
|
|
|
/* Small region slab metadata. */
|
2019-09-21 07:36:42 +08:00
|
|
|
slab_data_t e_slab_data;
|
2017-03-28 07:41:47 +08:00
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
/* Profiling data, used for large objects. */
|
|
|
|
struct {
|
|
|
|
/* Time when this was allocated. */
|
|
|
|
nstime_t e_alloc_time;
|
|
|
|
/* Points to a prof_tctx_t. */
|
|
|
|
atomic_p_t e_prof_tctx;
|
|
|
|
};
|
2017-03-28 07:41:47 +08:00
|
|
|
};
|
2017-01-11 10:06:31 +08:00
|
|
|
};
|
2017-01-30 13:57:14 +08:00
|
|
|
typedef ql_head(extent_t) extent_list_t;
|
2017-09-27 08:22:01 +08:00
|
|
|
typedef ph(extent_t) extent_tree_t;
|
2017-01-11 10:06:31 +08:00
|
|
|
typedef ph(extent_t) extent_heap_t;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Quantized collection of extents, with built-in LRU queue. */
|
|
|
|
struct extents_s {
|
|
|
|
malloc_mutex_t mtx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Quantized per size class heaps of extents.
|
|
|
|
*
|
|
|
|
* Synchronization: mtx.
|
|
|
|
*/
|
2018-07-20 08:08:10 +08:00
|
|
|
extent_heap_t heaps[SC_NPSIZES + 1];
|
2018-08-01 00:49:49 +08:00
|
|
|
atomic_zu_t nextents[SC_NPSIZES + 1];
|
|
|
|
atomic_zu_t nbytes[SC_NPSIZES + 1];
|
2017-01-30 13:57:14 +08:00
|
|
|
|
2017-03-24 14:45:11 +08:00
|
|
|
/*
|
|
|
|
* Bitmap for which set bits correspond to non-empty heaps.
|
|
|
|
*
|
|
|
|
* Synchronization: mtx.
|
|
|
|
*/
|
2018-07-20 08:08:10 +08:00
|
|
|
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
|
2017-03-24 14:45:11 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* LRU of all extents in heaps.
|
|
|
|
*
|
|
|
|
* Synchronization: mtx.
|
|
|
|
*/
|
|
|
|
extent_list_t lru;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page sum for all extents in heaps.
|
|
|
|
*
|
2017-03-08 09:57:48 +08:00
|
|
|
* The synchronization here is a little tricky. Modifications to npages
|
|
|
|
* must hold mtx, but reads need not (though, a reader who sees npages
|
|
|
|
* without holding the mutex can't assume anything about the rest of the
|
|
|
|
* state of the extents_t).
|
2017-01-30 13:57:14 +08:00
|
|
|
*/
|
2017-03-08 09:57:48 +08:00
|
|
|
atomic_zu_t npages;
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
/* All stored extents must be in the same state. */
|
|
|
|
extent_state_t state;
|
2017-02-13 15:18:57 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
/*
|
|
|
|
* If true, delay coalescing until eviction; otherwise coalesce during
|
|
|
|
* deallocation.
|
|
|
|
*/
|
|
|
|
bool delay_coalesce;
|
2017-01-30 13:57:14 +08:00
|
|
|
};
|
|
|
|
|
2019-03-16 02:01:45 +08:00
|
|
|
/*
|
|
|
|
* The following two structs are for experimental purposes. See
|
|
|
|
* experimental_utilization_query_ctl and
|
|
|
|
* experimental_utilization_batch_query_ctl in src/ctl.c.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct extent_util_stats_s {
|
|
|
|
size_t nfree;
|
|
|
|
size_t nregs;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct extent_util_stats_verbose_s {
|
|
|
|
void *slabcur_addr;
|
|
|
|
size_t nfree;
|
|
|
|
size_t nregs;
|
|
|
|
size_t size;
|
|
|
|
size_t bin_nfree;
|
|
|
|
size_t bin_nregs;
|
|
|
|
};
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|