2019-12-10 06:36:45 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_EDATA_H
|
|
|
|
#define JEMALLOC_INTERNAL_EDATA_H
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
2019-09-21 07:18:41 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
|
|
|
#include "jemalloc/internal/bin_info.h"
|
|
|
|
#include "jemalloc/internal/bit_util.h"
|
2020-11-18 08:32:45 +08:00
|
|
|
#include "jemalloc/internal/hpdata.h"
|
2019-09-21 07:18:41 +08:00
|
|
|
#include "jemalloc/internal/nstime.h"
|
|
|
|
#include "jemalloc/internal/ph.h"
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/prof_types.h"
|
2019-09-21 07:18:41 +08:00
|
|
|
#include "jemalloc/internal/ql.h"
|
|
|
|
#include "jemalloc/internal/sc.h"
|
|
|
|
#include "jemalloc/internal/slab_data.h"
|
|
|
|
#include "jemalloc/internal/sz.h"
|
2020-06-12 06:16:38 +08:00
|
|
|
#include "jemalloc/internal/typed_list.h"
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2021-02-27 07:32:41 +08:00
|
|
|
/*
|
|
|
|
* sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
|
|
|
|
* to free up the low bits in the rtree leaf.
|
|
|
|
*/
|
|
|
|
#define EDATA_ALIGNMENT 128
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
enum extent_state_e {
|
|
|
|
extent_state_active = 0,
|
|
|
|
extent_state_dirty = 1,
|
|
|
|
extent_state_muzzy = 2,
|
2021-03-05 06:33:40 +08:00
|
|
|
extent_state_retained = 3,
|
|
|
|
extent_state_transition = 4, /* States below are intermediate. */
|
|
|
|
extent_state_merging = 5,
|
|
|
|
extent_state_max = 5 /* Sanity checking only. */
|
2019-09-21 07:18:41 +08:00
|
|
|
};
|
|
|
|
typedef enum extent_state_e extent_state_t;
|
|
|
|
|
|
|
|
enum extent_head_state_e {
|
|
|
|
EXTENT_NOT_HEAD,
|
2021-02-27 07:11:58 +08:00
|
|
|
EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
|
2019-09-21 07:18:41 +08:00
|
|
|
};
|
|
|
|
typedef enum extent_head_state_e extent_head_state_t;
|
|
|
|
|
2020-07-10 09:07:17 +08:00
|
|
|
/*
|
|
|
|
* Which implementation of the page allocator interface, (PAI, defined in
|
|
|
|
* pai.h) owns the given extent?
|
|
|
|
*/
|
|
|
|
enum extent_pai_e {
|
|
|
|
EXTENT_PAI_PAC = 0,
|
|
|
|
EXTENT_PAI_HPA = 1
|
|
|
|
};
|
|
|
|
typedef enum extent_pai_e extent_pai_t;
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
struct e_prof_info_s {
|
|
|
|
/* Time when this was allocated. */
|
|
|
|
nstime_t e_prof_alloc_time;
|
2020-08-22 02:31:53 +08:00
|
|
|
/* Allocation request size. */
|
|
|
|
size_t e_prof_alloc_size;
|
2019-12-19 05:38:14 +08:00
|
|
|
/* Points to a prof_tctx_t. */
|
|
|
|
atomic_p_t e_prof_tctx;
|
|
|
|
/*
|
|
|
|
* Points to a prof_recent_t for the allocation; NULL
|
|
|
|
* means the recent allocation record no longer exists.
|
|
|
|
* Protected by prof_recent_alloc_mtx.
|
|
|
|
*/
|
|
|
|
atomic_p_t e_prof_recent_alloc;
|
|
|
|
};
|
|
|
|
typedef struct e_prof_info_s e_prof_info_t;
|
|
|
|
|
2020-03-14 02:47:51 +08:00
|
|
|
/*
|
|
|
|
* The information about a particular edata that lives in an emap. Space is
|
2021-07-20 23:46:19 +08:00
|
|
|
* more precious there (the information, plus the edata pointer, has to live in
|
2020-03-14 02:47:51 +08:00
|
|
|
* a 64-bit word if we want to enable a packed representation.
|
|
|
|
*
|
|
|
|
* There are two things that are special about the information here:
|
|
|
|
* - It's quicker to access. You have one fewer pointer hop, since finding the
|
|
|
|
* edata_t associated with an item always requires accessing the rtree leaf in
|
|
|
|
* which this data is stored.
|
|
|
|
* - It can be read unsynchronized, and without worrying about lifetime issues.
|
|
|
|
*/
|
|
|
|
typedef struct edata_map_info_s edata_map_info_t;
|
|
|
|
struct edata_map_info_s {
|
|
|
|
bool slab;
|
|
|
|
szind_t szind;
|
|
|
|
};
|
|
|
|
|
2021-07-21 00:02:17 +08:00
|
|
|
typedef struct edata_cmp_summary_s edata_cmp_summary_t;
|
|
|
|
struct edata_cmp_summary_s {
|
|
|
|
uint64_t sn;
|
|
|
|
uintptr_t addr;
|
|
|
|
};
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
2019-12-10 06:36:45 +08:00
|
|
|
typedef struct edata_s edata_t;
|
2021-07-20 07:47:10 +08:00
|
|
|
ph_structs(edata_avail, edata_t);
|
|
|
|
ph_structs(edata_heap, edata_t);
|
2019-12-10 06:36:45 +08:00
|
|
|
struct edata_s {
|
2019-09-21 07:18:41 +08:00
|
|
|
/*
|
|
|
|
* Bitfield containing several fields:
|
|
|
|
*
|
|
|
|
* a: arena_ind
|
|
|
|
* b: slab
|
|
|
|
* c: committed
|
2020-07-10 09:07:17 +08:00
|
|
|
* p: pai
|
2019-09-21 07:18:41 +08:00
|
|
|
* z: zeroed
|
2021-04-27 05:22:25 +08:00
|
|
|
* g: guarded
|
2019-09-21 07:18:41 +08:00
|
|
|
* t: state
|
|
|
|
* i: szind
|
|
|
|
* f: nfree
|
|
|
|
* s: bin_shard
|
|
|
|
*
|
2021-04-27 05:22:25 +08:00
|
|
|
* 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
|
2019-09-21 07:18:41 +08:00
|
|
|
*
|
|
|
|
* arena_ind: Arena from which this extent came, or all 1 bits if
|
|
|
|
* unassociated.
|
|
|
|
*
|
|
|
|
* slab: The slab flag indicates whether the extent is used for a slab
|
|
|
|
* of small regions. This helps differentiate small size classes,
|
|
|
|
* and it indicates whether interior pointers can be looked up via
|
|
|
|
* iealloc().
|
|
|
|
*
|
|
|
|
* committed: The committed flag indicates whether physical memory is
|
|
|
|
* committed to the extent, whether explicitly or implicitly
|
|
|
|
* as on a system that overcommits and satisfies physical
|
|
|
|
* memory needs on demand via soft page faults.
|
|
|
|
*
|
2020-07-10 09:07:17 +08:00
|
|
|
* pai: The pai flag is an extent_pai_t.
|
2019-09-21 07:18:41 +08:00
|
|
|
*
|
|
|
|
* zeroed: The zeroed flag is used by extent recycling code to track
|
|
|
|
* whether memory is zero-filled.
|
|
|
|
*
|
2021-04-27 05:22:25 +08:00
|
|
|
* guarded: The guarded flag is use by the sanitizer to track whether
|
|
|
|
* the extent has page guards around it.
|
|
|
|
*
|
2019-09-21 07:18:41 +08:00
|
|
|
* state: The state flag is an extent_state_t.
|
|
|
|
*
|
|
|
|
* szind: The szind flag indicates usable size class index for
|
|
|
|
* allocations residing in this extent, regardless of whether the
|
|
|
|
* extent is a slab. Extent size and usable size often differ
|
|
|
|
* even for non-slabs, either due to sz_large_pad or promotion of
|
|
|
|
* sampled small regions.
|
|
|
|
*
|
|
|
|
* nfree: Number of free regions in slab.
|
|
|
|
*
|
|
|
|
* bin_shard: the shard of the bin from which this extent came.
|
|
|
|
*/
|
|
|
|
uint64_t e_bits;
|
|
|
|
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
|
|
|
#define EDATA_BITS_ARENA_SHIFT 0
|
|
|
|
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_SLAB_WIDTH 1
|
|
|
|
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
|
|
|
|
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_COMMITTED_WIDTH 1
|
|
|
|
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
|
|
|
|
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2020-07-10 09:07:17 +08:00
|
|
|
#define EDATA_BITS_PAI_WIDTH 1
|
|
|
|
#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
|
|
|
|
#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_ZEROED_WIDTH 1
|
2020-07-10 09:07:17 +08:00
|
|
|
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2021-04-27 05:22:25 +08:00
|
|
|
#define EDATA_BITS_GUARDED_WIDTH 1
|
|
|
|
#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
|
|
|
|
#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
|
|
|
|
|
2021-02-27 07:32:41 +08:00
|
|
|
#define EDATA_BITS_STATE_WIDTH 3
|
2021-04-27 05:22:25 +08:00
|
|
|
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
|
|
|
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
|
|
|
|
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
|
|
|
|
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
|
|
|
|
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_BINSHARD_WIDTH 6
|
|
|
|
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
|
|
|
|
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_BITS_IS_HEAD_WIDTH 1
|
|
|
|
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
|
|
|
|
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
|
2019-09-21 07:18:41 +08:00
|
|
|
|
|
|
|
/* Pointer to the extent that this structure is responsible for. */
|
|
|
|
void *e_addr;
|
|
|
|
|
|
|
|
union {
|
|
|
|
/*
|
|
|
|
* Extent size and serial number associated with the extent
|
|
|
|
* structure (different than the serial number for the extent at
|
|
|
|
* e_addr).
|
|
|
|
*
|
|
|
|
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
|
|
|
*/
|
|
|
|
size_t e_size_esn;
|
2019-12-10 06:36:45 +08:00
|
|
|
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
|
|
|
|
#define EDATA_ESN_MASK ((size_t)PAGE-1)
|
2019-09-21 07:18:41 +08:00
|
|
|
/* Base extent size, which may not be a multiple of PAGE. */
|
|
|
|
size_t e_bsize;
|
|
|
|
};
|
|
|
|
|
2020-06-12 08:16:10 +08:00
|
|
|
/*
|
2020-11-18 08:32:45 +08:00
|
|
|
* If this edata is a user allocation from an HPA, it comes out of some
|
|
|
|
* pageslab (we don't yet support huegpage allocations that don't fit
|
|
|
|
* into pageslabs). This tracks it.
|
2020-06-12 08:16:10 +08:00
|
|
|
*/
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_t *e_ps;
|
2021-07-20 23:46:19 +08:00
|
|
|
|
2021-02-07 01:29:01 +08:00
|
|
|
/*
|
|
|
|
* Serial number. These are not necessarily unique; splitting an extent
|
|
|
|
* results in two extents with the same serial number.
|
|
|
|
*/
|
|
|
|
uint64_t e_sn;
|
2020-06-12 08:16:10 +08:00
|
|
|
|
2020-06-12 06:15:51 +08:00
|
|
|
union {
|
|
|
|
/*
|
|
|
|
* List linkage used when the edata_t is active; either in
|
|
|
|
* arena's large allocations or bin_t's slabs_full.
|
|
|
|
*/
|
|
|
|
ql_elm(edata_t) ql_link_active;
|
|
|
|
/*
|
|
|
|
* Pairing heap linkage. Used whenever the extent is inactive
|
|
|
|
* (in the page allocators), or when it is active and in
|
|
|
|
* slabs_nonfull, or when the edata_t is unassociated with an
|
|
|
|
* extent and sitting in an edata_cache.
|
|
|
|
*/
|
2021-07-20 07:47:10 +08:00
|
|
|
union {
|
|
|
|
edata_heap_link_t heap_link;
|
|
|
|
edata_avail_link_t avail_link;
|
|
|
|
};
|
2020-06-12 06:15:51 +08:00
|
|
|
};
|
2019-09-21 07:18:41 +08:00
|
|
|
|
|
|
|
union {
|
2020-06-12 06:15:51 +08:00
|
|
|
/*
|
|
|
|
* List linkage used when the extent is inactive:
|
|
|
|
* - Stashed dirty extents
|
|
|
|
* - Ecache LRU functionality.
|
|
|
|
*/
|
|
|
|
ql_elm(edata_t) ql_link_inactive;
|
2019-09-21 07:18:41 +08:00
|
|
|
/* Small region slab metadata. */
|
|
|
|
slab_data_t e_slab_data;
|
|
|
|
|
|
|
|
/* Profiling data, used for large objects. */
|
2019-12-19 05:38:14 +08:00
|
|
|
e_prof_info_t e_prof_info;
|
2019-09-21 07:18:41 +08:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2020-06-12 06:15:51 +08:00
|
|
|
TYPED_LIST(edata_list_active, edata_t, ql_link_active)
|
|
|
|
TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
|
2020-06-12 06:16:38 +08:00
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline unsigned
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_arena_ind_get(const edata_t *edata) {
|
|
|
|
unsigned arena_ind = (unsigned)((edata->e_bits &
|
|
|
|
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
|
|
|
|
|
|
|
return arena_ind;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline szind_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_szind_get_maybe_invalid(const edata_t *edata) {
|
|
|
|
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
|
|
|
|
EDATA_BITS_SZIND_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
assert(szind <= SC_NSIZES);
|
|
|
|
return szind;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline szind_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_szind_get(const edata_t *edata) {
|
|
|
|
szind_t szind = edata_szind_get_maybe_invalid(edata);
|
2019-09-21 07:18:41 +08:00
|
|
|
assert(szind < SC_NSIZES); /* Never call when "invalid". */
|
|
|
|
return szind;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_usize_get(const edata_t *edata) {
|
|
|
|
return sz_index2size(edata_szind_get(edata));
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_binshard_get(const edata_t *edata) {
|
|
|
|
unsigned binshard = (unsigned)((edata->e_bits &
|
|
|
|
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
|
|
|
|
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
2019-09-21 07:18:41 +08:00
|
|
|
return binshard;
|
|
|
|
}
|
|
|
|
|
2021-02-07 01:29:01 +08:00
|
|
|
static inline uint64_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_sn_get(const edata_t *edata) {
|
2021-02-07 01:29:01 +08:00
|
|
|
return edata->e_sn;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline extent_state_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_state_get(const edata_t *edata) {
|
|
|
|
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
|
|
|
|
EDATA_BITS_STATE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2021-04-27 05:22:25 +08:00
|
|
|
static inline bool
|
|
|
|
edata_guarded_get(const edata_t *edata) {
|
|
|
|
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
|
|
|
|
EDATA_BITS_GUARDED_SHIFT);
|
|
|
|
}
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline bool
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_zeroed_get(const edata_t *edata) {
|
|
|
|
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
|
|
|
|
EDATA_BITS_ZEROED_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_committed_get(const edata_t *edata) {
|
|
|
|
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
|
|
|
|
EDATA_BITS_COMMITTED_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2020-07-10 09:07:17 +08:00
|
|
|
static inline extent_pai_t
|
|
|
|
edata_pai_get(const edata_t *edata) {
|
|
|
|
return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
|
|
|
|
EDATA_BITS_PAI_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_slab_get(const edata_t *edata) {
|
|
|
|
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
|
|
|
|
EDATA_BITS_SLAB_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_get(const edata_t *edata) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
|
|
|
|
EDATA_BITS_NFREE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_base_get(const edata_t *edata) {
|
|
|
|
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
|
|
|
|
!edata_slab_get(edata));
|
|
|
|
return PAGE_ADDR2BASE(edata->e_addr);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_addr_get(const edata_t *edata) {
|
|
|
|
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
|
|
|
|
!edata_slab_get(edata));
|
|
|
|
return edata->e_addr;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_size_get(const edata_t *edata) {
|
|
|
|
return (edata->e_size_esn & EDATA_SIZE_MASK);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_esn_get(const edata_t *edata) {
|
|
|
|
return (edata->e_size_esn & EDATA_ESN_MASK);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_bsize_get(const edata_t *edata) {
|
|
|
|
return edata->e_bsize;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2020-11-18 08:32:45 +08:00
|
|
|
static inline hpdata_t *
|
2020-07-11 08:40:13 +08:00
|
|
|
edata_ps_get(const edata_t *edata) {
|
|
|
|
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
2020-11-18 08:32:45 +08:00
|
|
|
return edata->e_ps;
|
2020-07-11 08:40:13 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline void *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_before_get(const edata_t *edata) {
|
2023-07-25 01:33:36 +08:00
|
|
|
return (void *)((byte_t *)edata_base_get(edata) - PAGE);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_last_get(const edata_t *edata) {
|
2023-07-25 01:33:36 +08:00
|
|
|
return (void *)((byte_t *)edata_base_get(edata) +
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_size_get(edata) - PAGE);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_past_get(const edata_t *edata) {
|
2023-07-25 01:33:36 +08:00
|
|
|
return (void *)((byte_t *)edata_base_get(edata) +
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_size_get(edata));
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline slab_data_t *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_slab_data_get(edata_t *edata) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
return &edata->e_slab_data;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline const slab_data_t *
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_slab_data_get_const(const edata_t *edata) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
return &edata->e_slab_data;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
static inline prof_tctx_t *
|
|
|
|
edata_prof_tctx_get(const edata_t *edata) {
|
|
|
|
return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
|
|
|
|
ATOMIC_ACQUIRE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const nstime_t *
|
|
|
|
edata_prof_alloc_time_get(const edata_t *edata) {
|
|
|
|
return &edata->e_prof_info.e_prof_alloc_time;
|
|
|
|
}
|
|
|
|
|
2020-08-22 02:31:53 +08:00
|
|
|
static inline size_t
|
|
|
|
edata_prof_alloc_size_get(const edata_t *edata) {
|
|
|
|
return edata->e_prof_info.e_prof_alloc_size;
|
|
|
|
}
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
static inline prof_recent_t *
|
|
|
|
edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
|
|
|
|
return (prof_recent_t *)atomic_load_p(
|
|
|
|
&edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
|
|
|
|
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_binshard_set(edata_t *edata, unsigned binshard) {
|
2019-09-21 07:18:41 +08:00
|
|
|
/* The assertion assumes szind is set already. */
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
|
|
|
|
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_addr_set(edata_t *edata, void *addr) {
|
|
|
|
edata->e_addr = addr;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_size_set(edata_t *edata, size_t size) {
|
|
|
|
assert((size & ~EDATA_SIZE_MASK) == 0);
|
|
|
|
edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_esn_set(edata_t *edata, size_t esn) {
|
|
|
|
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
|
|
|
|
EDATA_ESN_MASK);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_bsize_set(edata_t *edata, size_t bsize) {
|
|
|
|
edata->e_bsize = bsize;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2020-09-19 06:50:27 +08:00
|
|
|
static inline void
|
2020-11-18 08:32:45 +08:00
|
|
|
edata_ps_set(edata_t *edata, hpdata_t *ps) {
|
|
|
|
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
|
|
|
edata->e_ps = ps;
|
2020-07-11 08:40:13 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_szind_set(edata_t *edata, szind_t szind) {
|
2019-09-21 07:18:41 +08:00
|
|
|
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
|
2019-12-10 06:36:45 +08:00
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
|
|
|
|
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_set(edata_t *edata, unsigned nfree) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
|
|
|
|
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
|
2019-09-21 07:18:41 +08:00
|
|
|
/* The assertion assumes szind is set already. */
|
2019-12-10 06:36:45 +08:00
|
|
|
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
|
|
|
|
edata->e_bits = (edata->e_bits &
|
|
|
|
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
|
|
|
|
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
|
|
|
|
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_inc(edata_t *edata) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_dec(edata_t *edata) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_nfree_sub(edata_t *edata, uint64_t n) {
|
|
|
|
assert(edata_slab_get(edata));
|
|
|
|
edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2021-02-07 01:29:01 +08:00
|
|
|
edata_sn_set(edata_t *edata, uint64_t sn) {
|
|
|
|
edata->e_sn = sn;
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_state_set(edata_t *edata, extent_state_t state) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
|
|
|
|
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2021-04-27 05:22:25 +08:00
|
|
|
static inline void
|
|
|
|
edata_guarded_set(edata_t *edata, bool guarded) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
|
|
|
|
((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
|
|
|
|
}
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_zeroed_set(edata_t *edata, bool zeroed) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
|
|
|
|
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_committed_set(edata_t *edata, bool committed) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
|
|
|
|
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2020-07-10 09:07:17 +08:00
|
|
|
edata_pai_set(edata_t *edata, extent_pai_t pai) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
|
|
|
|
((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_slab_set(edata_t *edata, bool slab) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
|
|
|
|
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
|
2019-12-19 05:38:14 +08:00
|
|
|
atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
|
2019-12-19 05:38:14 +08:00
|
|
|
nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
|
|
|
|
}
|
|
|
|
|
2020-08-22 02:31:53 +08:00
|
|
|
static inline void
|
|
|
|
edata_prof_alloc_size_set(edata_t *edata, size_t size) {
|
|
|
|
edata->e_prof_info.e_prof_alloc_size = size;
|
|
|
|
}
|
|
|
|
|
2019-12-19 05:38:14 +08:00
|
|
|
static inline void
|
|
|
|
edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
|
|
|
|
prof_recent_t *recent_alloc) {
|
|
|
|
atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
|
|
|
|
ATOMIC_RELAXED);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_is_head_get(edata_t *edata) {
|
|
|
|
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
|
|
|
|
EDATA_BITS_IS_HEAD_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_is_head_set(edata_t *edata, bool is_head) {
|
|
|
|
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
|
|
|
|
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2021-03-05 06:33:40 +08:00
|
|
|
static inline bool
|
|
|
|
edata_state_in_transition(extent_state_t state) {
|
|
|
|
return state >= extent_state_transition;
|
|
|
|
}
|
|
|
|
|
2020-05-14 03:42:04 +08:00
|
|
|
/*
|
|
|
|
* Because this function is implemented as a sequence of bitfield modifications,
|
|
|
|
* even though each individual bit is properly initialized, we technically read
|
|
|
|
* uninitialized data within it. This is mostly fine, since most callers get
|
|
|
|
* their edatas from zeroing sources, but callers who make stack edata_ts need
|
|
|
|
* to manually zero them.
|
|
|
|
*/
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline void
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
2021-02-07 01:29:01 +08:00
|
|
|
bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
|
2020-07-10 09:07:17 +08:00
|
|
|
bool committed, extent_pai_t pai, extent_head_state_t is_head) {
|
2019-09-21 07:18:41 +08:00
|
|
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_arena_ind_set(edata, arena_ind);
|
|
|
|
edata_addr_set(edata, addr);
|
|
|
|
edata_size_set(edata, size);
|
|
|
|
edata_slab_set(edata, slab);
|
|
|
|
edata_szind_set(edata, szind);
|
|
|
|
edata_sn_set(edata, sn);
|
|
|
|
edata_state_set(edata, state);
|
2021-04-27 05:22:25 +08:00
|
|
|
edata_guarded_set(edata, false);
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_zeroed_set(edata, zeroed);
|
|
|
|
edata_committed_set(edata, committed);
|
2020-07-10 09:07:17 +08:00
|
|
|
edata_pai_set(edata, pai);
|
2019-12-12 09:23:24 +08:00
|
|
|
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
|
2019-09-21 07:18:41 +08:00
|
|
|
if (config_prof) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_prof_tctx_set(edata, NULL);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2023-09-14 12:51:54 +08:00
|
|
|
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn,
|
|
|
|
bool reused) {
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
|
|
|
|
edata_addr_set(edata, addr);
|
|
|
|
edata_bsize_set(edata, bsize);
|
|
|
|
edata_slab_set(edata, false);
|
|
|
|
edata_szind_set(edata, SC_NSIZES);
|
|
|
|
edata_sn_set(edata, sn);
|
|
|
|
edata_state_set(edata, extent_state_active);
|
2023-09-14 12:51:54 +08:00
|
|
|
/* See comments in base_edata_is_reused. */
|
|
|
|
edata_guarded_set(edata, reused);
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_zeroed_set(edata, true);
|
|
|
|
edata_committed_set(edata, true);
|
2020-07-10 09:07:17 +08:00
|
|
|
/*
|
|
|
|
* This isn't strictly true, but base allocated extents never get
|
|
|
|
* deallocated and can't be looked up in the emap, but no sense in
|
|
|
|
* wasting a state bit to encode this fact.
|
|
|
|
*/
|
|
|
|
edata_pai_set(edata, EXTENT_PAI_PAC);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_esn_comp(const edata_t *a, const edata_t *b) {
|
|
|
|
size_t a_esn = edata_esn_get(a);
|
|
|
|
size_t b_esn = edata_esn_get(b);
|
2019-09-21 07:18:41 +08:00
|
|
|
|
|
|
|
return (a_esn > b_esn) - (a_esn < b_esn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_ead_comp(const edata_t *a, const edata_t *b) {
|
2019-09-21 07:18:41 +08:00
|
|
|
uintptr_t a_eaddr = (uintptr_t)a;
|
|
|
|
uintptr_t b_eaddr = (uintptr_t)b;
|
|
|
|
|
|
|
|
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
|
|
|
}
|
|
|
|
|
2021-07-21 00:02:17 +08:00
|
|
|
static inline edata_cmp_summary_t
|
|
|
|
edata_cmp_summary_get(const edata_t *edata) {
|
2022-05-09 20:36:49 +08:00
|
|
|
edata_cmp_summary_t result;
|
|
|
|
result.sn = edata_sn_get(edata);
|
|
|
|
result.addr = (uintptr_t)edata_addr_get(edata);
|
|
|
|
return result;
|
2021-07-21 00:02:17 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline int
|
2021-07-21 00:02:17 +08:00
|
|
|
edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
|
2023-05-02 02:49:35 +08:00
|
|
|
/*
|
|
|
|
* Logically, what we're doing here is comparing based on `.sn`, and
|
|
|
|
* falling back to comparing on `.addr` in the case that `a.sn == b.sn`.
|
|
|
|
* We accomplish this by multiplying the result of the `.sn` comparison
|
|
|
|
* by 2, so that so long as it is not 0, it will dominate the `.addr`
|
|
|
|
* comparison in determining the sign of the returned result value.
|
|
|
|
* The justification for doing things this way is that this is
|
|
|
|
* branchless - all of the branches that would be present in a
|
|
|
|
* straightforward implementation are common cases, and thus the branch
|
|
|
|
* prediction accuracy is not great. As a result, this implementation
|
|
|
|
* is measurably faster (by around 30%).
|
|
|
|
*/
|
|
|
|
return (2 * ((a.sn > b.sn) - (a.sn < b.sn))) +
|
|
|
|
((a.addr > b.addr) - (a.addr < b.addr));
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2021-07-21 00:02:17 +08:00
|
|
|
static inline int
|
|
|
|
edata_snad_comp(const edata_t *a, const edata_t *b) {
|
|
|
|
edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
|
|
|
|
edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
|
|
|
|
|
|
|
|
return edata_cmp_summary_comp(a_cmp, b_cmp);
|
|
|
|
}
|
|
|
|
|
2019-09-21 07:18:41 +08:00
|
|
|
static inline int
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_esnead_comp(const edata_t *a, const edata_t *b) {
|
2023-05-02 02:49:35 +08:00
|
|
|
/*
|
|
|
|
* Similar to `edata_cmp_summary_comp`, we've opted for a
|
|
|
|
* branchless implementation for the sake of performance.
|
|
|
|
*/
|
|
|
|
return (2 * edata_esn_comp(a, b)) + edata_ead_comp(a, b);
|
2019-09-21 07:18:41 +08:00
|
|
|
}
|
|
|
|
|
2021-07-20 07:47:10 +08:00
|
|
|
ph_proto(, edata_avail, edata_t)
|
|
|
|
ph_proto(, edata_heap, edata_t)
|
2019-12-04 10:31:47 +08:00
|
|
|
|
2019-12-10 06:36:45 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_EDATA_H */
|