2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
|
|
|
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
|
|
|
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2017-04-21 05:32:24 +08:00
|
|
|
#include "jemalloc/internal/bitmap.h"
|
2017-04-11 08:11:33 +08:00
|
|
|
#include "jemalloc/internal/ql.h"
|
2017-04-17 13:31:16 +08:00
|
|
|
#include "jemalloc/internal/rb.h"
|
|
|
|
#include "jemalloc/internal/ph.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
#include "jemalloc/internal/size_classes.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
typedef enum {
|
|
|
|
extent_state_active = 0,
|
|
|
|
extent_state_dirty = 1,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
extent_state_muzzy = 2,
|
|
|
|
extent_state_retained = 3
|
2017-01-30 13:57:14 +08:00
|
|
|
} extent_state_t;
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
|
|
|
struct extent_s {
|
|
|
|
/*
|
2017-03-25 06:22:26 +08:00
|
|
|
* Bitfield containing several fields:
|
|
|
|
*
|
|
|
|
* a: arena_ind
|
|
|
|
* b: slab
|
|
|
|
* c: committed
|
|
|
|
* z: zeroed
|
|
|
|
* t: state
|
|
|
|
* i: szind
|
2017-03-28 07:41:47 +08:00
|
|
|
* f: nfree
|
2017-04-17 12:51:26 +08:00
|
|
|
* n: sn
|
2017-03-25 06:22:26 +08:00
|
|
|
*
|
2017-03-28 07:41:47 +08:00
|
|
|
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
|
2017-03-25 06:22:26 +08:00
|
|
|
*
|
|
|
|
* arena_ind: Arena from which this extent came, or all 1 bits if
|
|
|
|
* unassociated.
|
|
|
|
*
|
|
|
|
* slab: The slab flag indicates whether the extent is used for a slab
|
|
|
|
* of small regions. This helps differentiate small size classes,
|
|
|
|
* and it indicates whether interior pointers can be looked up via
|
|
|
|
* iealloc().
|
|
|
|
*
|
|
|
|
* committed: The committed flag indicates whether physical memory is
|
|
|
|
* committed to the extent, whether explicitly or implicitly
|
|
|
|
* as on a system that overcommits and satisfies physical
|
|
|
|
* memory needs on demand via soft page faults.
|
|
|
|
*
|
|
|
|
* zeroed: The zeroed flag is used by extent recycling code to track
|
|
|
|
* whether memory is zero-filled.
|
|
|
|
*
|
|
|
|
* state: The state flag is an extent_state_t.
|
2017-01-11 10:06:31 +08:00
|
|
|
*
|
2017-03-25 06:22:26 +08:00
|
|
|
* szind: The szind flag indicates usable size class index for
|
|
|
|
* allocations residing in this extent, regardless of whether the
|
|
|
|
* extent is a slab. Extent size and usable size often differ
|
|
|
|
* even for non-slabs, either due to large_pad or promotion of
|
|
|
|
* sampled small regions.
|
2017-01-11 10:06:31 +08:00
|
|
|
*
|
2017-03-28 07:41:47 +08:00
|
|
|
* nfree: Number of free regions in slab.
|
|
|
|
*
|
2017-03-25 06:22:26 +08:00
|
|
|
* sn: Serial number (potentially non-unique).
|
|
|
|
*
|
2017-04-27 07:26:12 +08:00
|
|
|
* Serial numbers may wrap around if !opt_retain, but as long as
|
|
|
|
* comparison functions fall back on address comparison for equal
|
|
|
|
* serial numbers, stable (if imperfect) ordering is maintained.
|
2017-03-25 06:22:26 +08:00
|
|
|
*
|
|
|
|
* Serial numbers may not be unique even in the absence of
|
|
|
|
* wrap-around, e.g. when splitting an extent and assigning the same
|
|
|
|
* serial number to both resulting adjacent extents.
|
2017-01-11 10:06:31 +08:00
|
|
|
*/
|
2017-03-25 06:22:26 +08:00
|
|
|
uint64_t e_bits;
|
|
|
|
#define EXTENT_BITS_ARENA_SHIFT 0
|
|
|
|
#define EXTENT_BITS_ARENA_MASK \
|
2017-03-28 07:41:47 +08:00
|
|
|
(((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-25 06:22:26 +08:00
|
|
|
#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
|
2017-03-28 07:41:47 +08:00
|
|
|
#define EXTENT_BITS_SLAB_MASK \
|
|
|
|
((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-25 06:22:26 +08:00
|
|
|
#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
|
2017-03-28 07:41:47 +08:00
|
|
|
#define EXTENT_BITS_COMMITTED_MASK \
|
|
|
|
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-25 06:22:26 +08:00
|
|
|
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
|
2017-03-28 07:41:47 +08:00
|
|
|
#define EXTENT_BITS_ZEROED_MASK \
|
|
|
|
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-25 06:22:26 +08:00
|
|
|
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
|
2017-03-28 07:41:47 +08:00
|
|
|
#define EXTENT_BITS_STATE_MASK \
|
|
|
|
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
|
2017-03-25 06:22:26 +08:00
|
|
|
|
|
|
|
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
|
|
|
|
#define EXTENT_BITS_SZIND_MASK \
|
2017-03-28 07:41:47 +08:00
|
|
|
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
|
2017-03-25 06:22:26 +08:00
|
|
|
|
2017-03-28 07:41:47 +08:00
|
|
|
#define EXTENT_BITS_NFREE_SHIFT \
|
2017-03-25 06:22:26 +08:00
|
|
|
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
|
2017-03-28 07:41:47 +08:00
|
|
|
#define EXTENT_BITS_NFREE_MASK \
|
|
|
|
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
|
|
|
|
|
|
|
|
#define EXTENT_BITS_SN_SHIFT \
|
|
|
|
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
|
2017-03-25 06:22:26 +08:00
|
|
|
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
|
|
|
|
|
|
|
/* Pointer to the extent that this structure is responsible for. */
|
|
|
|
void *e_addr;
|
|
|
|
|
2017-04-17 12:51:26 +08:00
|
|
|
union {
|
|
|
|
/*
|
|
|
|
* Extent size and serial number associated with the extent
|
|
|
|
* structure (different than the serial number for the extent at
|
|
|
|
* e_addr).
|
|
|
|
*
|
|
|
|
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
|
|
|
*/
|
|
|
|
size_t e_size_esn;
|
|
|
|
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
|
|
|
|
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
|
|
|
|
/* Base extent size, which may not be a multiple of PAGE. */
|
|
|
|
size_t e_bsize;
|
|
|
|
};
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-04-17 13:31:16 +08:00
|
|
|
union {
|
|
|
|
/*
|
|
|
|
* List linkage, used by a variety of lists:
|
|
|
|
* - arena_bin_t's slabs_full
|
|
|
|
* - extents_t's LRU
|
|
|
|
* - stashed dirty extents
|
|
|
|
* - arena's large allocations
|
|
|
|
*/
|
|
|
|
ql_elm(extent_t) ql_link;
|
|
|
|
/* Red-black tree linkage, used by arena's extent_avail. */
|
|
|
|
rb_node(extent_t) rb_link;
|
|
|
|
};
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Linkage for per size class sn/address-ordered heaps. */
|
|
|
|
phn(extent_t) ph_link;
|
2017-03-28 07:41:47 +08:00
|
|
|
|
|
|
|
union {
|
|
|
|
/* Small region slab metadata. */
|
|
|
|
arena_slab_data_t e_slab_data;
|
|
|
|
|
2017-04-05 05:33:25 +08:00
|
|
|
/*
|
|
|
|
* Profile counters, used for large objects. Points to a
|
|
|
|
* prof_tctx_t.
|
|
|
|
*/
|
|
|
|
atomic_p_t e_prof_tctx;
|
2017-03-28 07:41:47 +08:00
|
|
|
};
|
2017-01-11 10:06:31 +08:00
|
|
|
};
|
2017-01-30 13:57:14 +08:00
|
|
|
typedef ql_head(extent_t) extent_list_t;
|
2017-04-17 13:31:16 +08:00
|
|
|
typedef rb_tree(extent_t) extent_tree_t;
|
2017-01-11 10:06:31 +08:00
|
|
|
typedef ph(extent_t) extent_heap_t;
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Quantized collection of extents, with built-in LRU queue. */
|
|
|
|
struct extents_s {
|
|
|
|
malloc_mutex_t mtx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Quantized per size class heaps of extents.
|
|
|
|
*
|
|
|
|
* Synchronization: mtx.
|
|
|
|
*/
|
|
|
|
extent_heap_t heaps[NPSIZES+1];
|
|
|
|
|
2017-03-24 14:45:11 +08:00
|
|
|
/*
|
|
|
|
* Bitmap for which set bits correspond to non-empty heaps.
|
|
|
|
*
|
|
|
|
* Synchronization: mtx.
|
|
|
|
*/
|
|
|
|
bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* LRU of all extents in heaps.
|
|
|
|
*
|
|
|
|
* Synchronization: mtx.
|
|
|
|
*/
|
|
|
|
extent_list_t lru;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page sum for all extents in heaps.
|
|
|
|
*
|
2017-03-08 09:57:48 +08:00
|
|
|
* The synchronization here is a little tricky. Modifications to npages
|
|
|
|
* must hold mtx, but reads need not (though, a reader who sees npages
|
|
|
|
* without holding the mutex can't assume anything about the rest of the
|
|
|
|
* state of the extents_t).
|
2017-01-30 13:57:14 +08:00
|
|
|
*/
|
2017-03-08 09:57:48 +08:00
|
|
|
atomic_zu_t npages;
|
2017-01-30 13:57:14 +08:00
|
|
|
|
|
|
|
/* All stored extents must be in the same state. */
|
|
|
|
extent_state_t state;
|
2017-02-13 15:18:57 +08:00
|
|
|
|
2017-03-03 10:04:35 +08:00
|
|
|
/*
|
|
|
|
* If true, delay coalescing until eviction; otherwise coalesce during
|
|
|
|
* deallocation.
|
|
|
|
*/
|
|
|
|
bool delay_coalesce;
|
2017-01-30 13:57:14 +08:00
|
|
|
};
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|