#ifndef JEMALLOC_INTERNAL_EDATA_H #define JEMALLOC_INTERNAL_EDATA_H #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bin_info.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/ph.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/slab_data.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/typed_list.h" enum extent_state_e { extent_state_active = 0, extent_state_dirty = 1, extent_state_muzzy = 2, extent_state_retained = 3 }; typedef enum extent_state_e extent_state_t; enum extent_head_state_e { EXTENT_NOT_HEAD, EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */ }; typedef enum extent_head_state_e extent_head_state_t; /* * Which implementation of the page allocator interface, (PAI, defined in * pai.h) owns the given extent? */ enum extent_pai_e { EXTENT_PAI_PAC = 0, EXTENT_PAI_HPA = 1 }; typedef enum extent_pai_e extent_pai_t; struct e_prof_info_s { /* Time when this was allocated. */ nstime_t e_prof_alloc_time; /* Points to a prof_tctx_t. */ atomic_p_t e_prof_tctx; /* * Points to a prof_recent_t for the allocation; NULL * means the recent allocation record no longer exists. * Protected by prof_recent_alloc_mtx. */ atomic_p_t e_prof_recent_alloc; }; typedef struct e_prof_info_s e_prof_info_t; /* * The information about a particular edata that lives in an emap. Space is * more previous there (the information, plus the edata pointer, has to live in * a 64-bit word if we want to enable a packed representation. * * There are two things that are special about the information here: * - It's quicker to access. You have one fewer pointer hop, since finding the * edata_t associated with an item always requires accessing the rtree leaf in * which this data is stored. * - It can be read unsynchronized, and without worrying about lifetime issues. */ typedef struct edata_map_info_s edata_map_info_t; struct edata_map_info_s { bool slab; szind_t szind; }; /* Extent (span of pages). Use accessor functions for e_* fields. */ typedef struct edata_s edata_t; typedef ph(edata_t) edata_avail_t; typedef ph(edata_t) edata_heap_t; typedef ph(edata_t) edata_age_heap_t; struct edata_s { /* * Bitfield containing several fields: * * a: arena_ind * b: slab * c: committed * p: pai * z: zeroed * t: state * i: szind * f: nfree * s: bin_shard * n: sn * * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zpcbaaaa aaaaaaaa * * arena_ind: Arena from which this extent came, or all 1 bits if * unassociated. * * slab: The slab flag indicates whether the extent is used for a slab * of small regions. This helps differentiate small size classes, * and it indicates whether interior pointers can be looked up via * iealloc(). * * committed: The committed flag indicates whether physical memory is * committed to the extent, whether explicitly or implicitly * as on a system that overcommits and satisfies physical * memory needs on demand via soft page faults. * * pai: The pai flag is an extent_pai_t. * * zeroed: The zeroed flag is used by extent recycling code to track * whether memory is zero-filled. * * state: The state flag is an extent_state_t. * * szind: The szind flag indicates usable size class index for * allocations residing in this extent, regardless of whether the * extent is a slab. Extent size and usable size often differ * even for non-slabs, either due to sz_large_pad or promotion of * sampled small regions. * * nfree: Number of free regions in slab. * * bin_shard: the shard of the bin from which this extent came. * * sn: Serial number (potentially non-unique). * * Serial numbers may wrap around if !opt_retain, but as long as * comparison functions fall back on address comparison for equal * serial numbers, stable (if imperfect) ordering is maintained. * * Serial numbers may not be unique even in the absence of * wrap-around, e.g. when splitting an extent and assigning the same * serial number to both resulting adjacent extents. */ uint64_t e_bits; #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) #define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS #define EDATA_BITS_ARENA_SHIFT 0 #define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT) #define EDATA_BITS_SLAB_WIDTH 1 #define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT) #define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT) #define EDATA_BITS_COMMITTED_WIDTH 1 #define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) #define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) #define EDATA_BITS_PAI_WIDTH 1 #define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) #define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT) #define EDATA_BITS_ZEROED_WIDTH 1 #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) #define EDATA_BITS_STATE_WIDTH 2 #define EDATA_BITS_STATE_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) #define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) #define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) #define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT) #define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT) #define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1) #define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT) #define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT) #define EDATA_BITS_BINSHARD_WIDTH 6 #define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT) #define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT) #define EDATA_BITS_IS_HEAD_WIDTH 1 #define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT) #define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT) #define EDATA_BITS_SN_SHIFT (EDATA_BITS_IS_HEAD_WIDTH + EDATA_BITS_IS_HEAD_SHIFT) #define EDATA_BITS_SN_MASK (UINT64_MAX << EDATA_BITS_SN_SHIFT) /* Pointer to the extent that this structure is responsible for. */ void *e_addr; union { /* * Extent size and serial number associated with the extent * structure (different than the serial number for the extent at * e_addr). * * ssssssss [...] ssssssss ssssnnnn nnnnnnnn */ size_t e_size_esn; #define EDATA_SIZE_MASK ((size_t)~(PAGE-1)) #define EDATA_ESN_MASK ((size_t)PAGE-1) /* Base extent size, which may not be a multiple of PAGE. */ size_t e_bsize; }; /* * In some context-specific sense, the age of an active extent. Each * context can pick a specific meaning, and share the definition of the * edata_age_heap_t below. */ uint64_t age; union { /* * We could steal a low bit from these fields to indicate what * sort of "thing" this is (a page slab, an object within a page * slab, or a non-pageslab range). We don't do this yet, but it * would enable some extra asserts. */ /* * If this edata is from an HPA, it may be part of some larger * pageslab. Track it if so. Otherwise (either because it's * not part of a pageslab, or not from the HPA at all), NULL. */ edata_t *ps; /* * If this edata *is* a pageslab, then we cache some useful * information about its associated bitmap. */ struct { /* * The longest free range a pageslab contains determines * the heap it lives in. If we know that it didn't * change after an operation, we can avoid moving it * between heaps. */ uint32_t longest_free_range; }; }; union { /* * List linkage used when the edata_t is active; either in * arena's large allocations or bin_t's slabs_full. */ ql_elm(edata_t) ql_link_active; /* * Pairing heap linkage. Used whenever the extent is inactive * (in the page allocators), or when it is active and in * slabs_nonfull, or when the edata_t is unassociated with an * extent and sitting in an edata_cache. */ phn(edata_t) ph_link; }; union { /* * List linkage used when the extent is inactive: * - Stashed dirty extents * - Ecache LRU functionality. */ ql_elm(edata_t) ql_link_inactive; /* Small region slab metadata. */ slab_data_t e_slab_data; /* Profiling data, used for large objects. */ e_prof_info_t e_prof_info; }; }; TYPED_LIST(edata_list_active, edata_t, ql_link_active) TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive) static inline unsigned edata_arena_ind_get(const edata_t *edata) { unsigned arena_ind = (unsigned)((edata->e_bits & EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT); assert(arena_ind < MALLOCX_ARENA_LIMIT); return arena_ind; } static inline szind_t edata_szind_get_maybe_invalid(const edata_t *edata) { szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >> EDATA_BITS_SZIND_SHIFT); assert(szind <= SC_NSIZES); return szind; } static inline szind_t edata_szind_get(const edata_t *edata) { szind_t szind = edata_szind_get_maybe_invalid(edata); assert(szind < SC_NSIZES); /* Never call when "invalid". */ return szind; } static inline size_t edata_usize_get(const edata_t *edata) { return sz_index2size(edata_szind_get(edata)); } static inline unsigned edata_binshard_get(const edata_t *edata) { unsigned binshard = (unsigned)((edata->e_bits & EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT); assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); return binshard; } static inline size_t edata_sn_get(const edata_t *edata) { return (size_t)((edata->e_bits & EDATA_BITS_SN_MASK) >> EDATA_BITS_SN_SHIFT); } static inline extent_state_t edata_state_get(const edata_t *edata) { return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >> EDATA_BITS_STATE_SHIFT); } static inline bool edata_zeroed_get(const edata_t *edata) { return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >> EDATA_BITS_ZEROED_SHIFT); } static inline bool edata_committed_get(const edata_t *edata) { return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >> EDATA_BITS_COMMITTED_SHIFT); } static inline extent_pai_t edata_pai_get(const edata_t *edata) { return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >> EDATA_BITS_PAI_SHIFT); } static inline bool edata_slab_get(const edata_t *edata) { return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >> EDATA_BITS_SLAB_SHIFT); } static inline unsigned edata_nfree_get(const edata_t *edata) { assert(edata_slab_get(edata)); return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >> EDATA_BITS_NFREE_SHIFT); } static inline void * edata_base_get(const edata_t *edata) { assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || !edata_slab_get(edata)); return PAGE_ADDR2BASE(edata->e_addr); } static inline void * edata_addr_get(const edata_t *edata) { assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || !edata_slab_get(edata)); return edata->e_addr; } static inline size_t edata_size_get(const edata_t *edata) { return (edata->e_size_esn & EDATA_SIZE_MASK); } static inline size_t edata_esn_get(const edata_t *edata) { return (edata->e_size_esn & EDATA_ESN_MASK); } static inline size_t edata_bsize_get(const edata_t *edata) { return edata->e_bsize; } static inline uint64_t edata_age_get(const edata_t *edata) { return edata->age; } static inline edata_t * edata_ps_get(const edata_t *edata) { assert(edata_pai_get(edata) == EXTENT_PAI_HPA); return edata->ps; } static inline uint32_t edata_longest_free_range_get(const edata_t *edata) { assert(edata_pai_get(edata) == EXTENT_PAI_HPA); return edata->longest_free_range; } static inline void * edata_before_get(const edata_t *edata) { return (void *)((uintptr_t)edata_base_get(edata) - PAGE); } static inline void * edata_last_get(const edata_t *edata) { return (void *)((uintptr_t)edata_base_get(edata) + edata_size_get(edata) - PAGE); } static inline void * edata_past_get(const edata_t *edata) { return (void *)((uintptr_t)edata_base_get(edata) + edata_size_get(edata)); } static inline slab_data_t * edata_slab_data_get(edata_t *edata) { assert(edata_slab_get(edata)); return &edata->e_slab_data; } static inline const slab_data_t * edata_slab_data_get_const(const edata_t *edata) { assert(edata_slab_get(edata)); return &edata->e_slab_data; } static inline prof_tctx_t * edata_prof_tctx_get(const edata_t *edata) { return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx, ATOMIC_ACQUIRE); } static inline const nstime_t * edata_prof_alloc_time_get(const edata_t *edata) { return &edata->e_prof_info.e_prof_alloc_time; } static inline prof_recent_t * edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) { return (prof_recent_t *)atomic_load_p( &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED); } static inline void edata_arena_ind_set(edata_t *edata, unsigned arena_ind) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) | ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT); } static inline void edata_binshard_set(edata_t *edata, unsigned binshard) { /* The assertion assumes szind is set already. */ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT); } static inline void edata_addr_set(edata_t *edata, void *addr) { edata->e_addr = addr; } static inline void edata_size_set(edata_t *edata, size_t size) { assert((size & ~EDATA_SIZE_MASK) == 0); edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK); } static inline void edata_esn_set(edata_t *edata, size_t esn) { edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn & EDATA_ESN_MASK); } static inline void edata_bsize_set(edata_t *edata, size_t bsize) { edata->e_bsize = bsize; } static inline void edata_age_set(edata_t *edata, uint64_t age) { edata->age = age; } static inline void edata_ps_set(edata_t *edata, edata_t *ps) { assert(edata_pai_get(edata) == EXTENT_PAI_HPA || ps == NULL); edata->ps = ps; } static inline void edata_longest_free_range_set(edata_t *edata, uint32_t longest_free_range) { assert(edata_pai_get(edata) == EXTENT_PAI_HPA || longest_free_range == 0); edata->longest_free_range = longest_free_range; } static inline void edata_szind_set(edata_t *edata, szind_t szind) { assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) | ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT); } static inline void edata_nfree_set(edata_t *edata, unsigned nfree) { assert(edata_slab_get(edata)); edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); } static inline void edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) { /* The assertion assumes szind is set already. */ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); edata->e_bits = (edata->e_bits & (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); } static inline void edata_nfree_inc(edata_t *edata) { assert(edata_slab_get(edata)); edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); } static inline void edata_nfree_dec(edata_t *edata) { assert(edata_slab_get(edata)); edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); } static inline void edata_nfree_sub(edata_t *edata, uint64_t n) { assert(edata_slab_get(edata)); edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT); } static inline void edata_sn_set(edata_t *edata, size_t sn) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_SN_MASK) | ((uint64_t)sn << EDATA_BITS_SN_SHIFT); } static inline void edata_state_set(edata_t *edata, extent_state_t state) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) | ((uint64_t)state << EDATA_BITS_STATE_SHIFT); } static inline void edata_zeroed_set(edata_t *edata, bool zeroed) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) | ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT); } static inline void edata_committed_set(edata_t *edata, bool committed) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) | ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT); } static inline void edata_pai_set(edata_t *edata, extent_pai_t pai) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) | ((uint64_t)pai << EDATA_BITS_PAI_SHIFT); } static inline void edata_slab_set(edata_t *edata, bool slab) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) | ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT); } static inline void edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) { atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE); } static inline void edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) { nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t); } static inline void edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata, prof_recent_t *recent_alloc) { atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc, ATOMIC_RELAXED); } static inline bool edata_is_head_get(edata_t *edata) { return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >> EDATA_BITS_IS_HEAD_SHIFT); } static inline void edata_is_head_set(edata_t *edata, bool is_head) { edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) | ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT); } /* * Because this function is implemented as a sequence of bitfield modifications, * even though each individual bit is properly initialized, we technically read * uninitialized data within it. This is mostly fine, since most callers get * their edatas from zeroing sources, but callers who make stack edata_ts need * to manually zero them. */ static inline void edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size, bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, bool committed, extent_pai_t pai, extent_head_state_t is_head) { assert(addr == PAGE_ADDR2BASE(addr) || !slab); edata_arena_ind_set(edata, arena_ind); edata_addr_set(edata, addr); edata_size_set(edata, size); edata_slab_set(edata, slab); edata_szind_set(edata, szind); edata_sn_set(edata, sn); edata_state_set(edata, state); edata_zeroed_set(edata, zeroed); edata_committed_set(edata, committed); edata_pai_set(edata, pai); edata_is_head_set(edata, is_head == EXTENT_IS_HEAD); if (config_prof) { edata_prof_tctx_set(edata, NULL); } edata_age_set(edata, 0); edata_ps_set(edata, NULL); edata_longest_free_range_set(edata, 0); } static inline void edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) { edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1); edata_addr_set(edata, addr); edata_bsize_set(edata, bsize); edata_slab_set(edata, false); edata_szind_set(edata, SC_NSIZES); edata_sn_set(edata, sn); edata_state_set(edata, extent_state_active); edata_zeroed_set(edata, true); edata_committed_set(edata, true); edata_age_set(edata, 0); /* * This isn't strictly true, but base allocated extents never get * deallocated and can't be looked up in the emap, but no sense in * wasting a state bit to encode this fact. */ edata_pai_set(edata, EXTENT_PAI_PAC); edata_ps_set(edata, NULL); edata_longest_free_range_set(edata, 0); } static inline int edata_sn_comp(const edata_t *a, const edata_t *b) { size_t a_sn = edata_sn_get(a); size_t b_sn = edata_sn_get(b); return (a_sn > b_sn) - (a_sn < b_sn); } static inline int edata_esn_comp(const edata_t *a, const edata_t *b) { size_t a_esn = edata_esn_get(a); size_t b_esn = edata_esn_get(b); return (a_esn > b_esn) - (a_esn < b_esn); } static inline int edata_ad_comp(const edata_t *a, const edata_t *b) { uintptr_t a_addr = (uintptr_t)edata_addr_get(a); uintptr_t b_addr = (uintptr_t)edata_addr_get(b); return (a_addr > b_addr) - (a_addr < b_addr); } static inline int edata_ead_comp(const edata_t *a, const edata_t *b) { uintptr_t a_eaddr = (uintptr_t)a; uintptr_t b_eaddr = (uintptr_t)b; return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); } static inline int edata_snad_comp(const edata_t *a, const edata_t *b) { int ret; ret = edata_sn_comp(a, b); if (ret != 0) { return ret; } ret = edata_ad_comp(a, b); return ret; } static inline int edata_esnead_comp(const edata_t *a, const edata_t *b) { int ret; ret = edata_esn_comp(a, b); if (ret != 0) { return ret; } ret = edata_ead_comp(a, b); return ret; } static inline int edata_age_comp(const edata_t *a, const edata_t *b) { uint64_t a_age = edata_age_get(a); uint64_t b_age = edata_age_get(b); /* * Equal ages are possible in certain race conditions, like two distinct * threads simultaneously allocating a new fresh slab without holding a * bin lock. */ int ret = (a_age > b_age) - (a_age < b_age); if (ret != 0) { return ret; } return edata_snad_comp(a, b); } ph_proto(, edata_avail_, edata_avail_t, edata_t) ph_proto(, edata_heap_, edata_heap_t, edata_t) ph_proto(, edata_age_heap_, edata_age_heap_t, edata_t); #endif /* JEMALLOC_INTERNAL_EDATA_H */