Rename extent_t to edata_t.

This frees us up from the unfortunate extent/extent2 naming collision.
This commit is contained in:
David Goldblatt 2019-12-09 14:36:45 -08:00 committed by David Goldblatt
parent 865debda22
commit a7862df616
32 changed files with 1200 additions and 1208 deletions

View File

@ -28,18 +28,18 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, extent_t *extent);
ehooks_t *ehooks, edata_t *edata);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
#endif
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
extent_t *extent);
edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
edata_t *edata, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
edata_t *edata, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
@ -64,7 +64,7 @@ void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *extent, void *ptr);
szind_t binind, edata_t *edata, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);

View File

@ -9,8 +9,8 @@
#include "jemalloc/internal/ticker.h"
static inline arena_t *
arena_get_from_extent(extent_t *extent) {
return (arena_t *)atomic_load_p(&arenas[extent_arena_ind_get(extent)],
arena_get_from_edata(edata_t *edata) {
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
ATOMIC_RELAXED);
}
@ -42,20 +42,20 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx,
assert(ptr != NULL);
assert(prof_info != NULL);
const extent_t *extent;
const edata_t *edata;
bool is_slab;
/* Static check. */
if (alloc_ctx == NULL) {
extent = iealloc(tsd_tsdn(tsd), ptr);
is_slab = extent_slab_get(extent);
edata = iealloc(tsd_tsdn(tsd), ptr);
is_slab = edata_slab_get(edata);
} else if (!unlikely(is_slab = alloc_ctx->slab)) {
extent = iealloc(tsd_tsdn(tsd), ptr);
edata = iealloc(tsd_tsdn(tsd), ptr);
}
if (unlikely(!is_slab)) {
/* extent must have been initialized at this point. */
large_prof_info_get(extent, prof_info);
/* edata must have been initialized at this point. */
large_prof_info_get(edata, prof_info);
} else {
memset(prof_info, 0, sizeof(prof_info_t));
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
@ -69,9 +69,9 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx) {
/* Static check. */
if (alloc_ctx == NULL) {
extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
if (unlikely(!extent_slab_get(extent))) {
large_prof_tctx_reset(extent);
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
if (unlikely(!edata_slab_get(edata))) {
large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
@ -85,10 +85,10 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
assert(!extent_slab_get(extent));
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
assert(!edata_slab_get(edata));
large_prof_tctx_reset(extent);
large_prof_tctx_reset(edata);
}
JEMALLOC_ALWAYS_INLINE void
@ -96,9 +96,9 @@ arena_prof_info_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
assert(!extent_slab_get(extent));
large_prof_info_set(extent, tctx);
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
assert(!edata_slab_get(edata));
large_prof_info_set(edata, tctx);
}
JEMALLOC_ALWAYS_INLINE void
@ -130,9 +130,9 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
/* Purge a single extent to retained / unmapped directly. */
JEMALLOC_ALWAYS_INLINE void
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
extent_t *extent) {
size_t extent_size = extent_size_get(extent);
extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
edata_t *edata) {
size_t extent_size = edata_size_get(edata);
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
if (config_stats) {
/* Update stats accordingly. */
arena_stats_lock(tsdn, &arena->stats);
@ -169,7 +169,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return (arena_t *)atomic_load_p(&arenas[extent_arena_ind_get(
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(
iealloc(tsdn, ptr))], ATOMIC_RELAXED);
}
@ -201,19 +201,19 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
edata_t *edata;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &edata, &szind)) {
return 0;
}
if (extent == NULL) {
if (edata == NULL) {
return 0;
}
assert(extent_state_get(extent) == extent_state_active);
assert(edata_state_get(edata) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(edata_addr_get(edata) == ptr || edata_slab_get(edata));
assert(szind != SC_NSIZES);
@ -225,8 +225,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
edata_t *edata = iealloc(tsdn, ptr);
large_dalloc(tsdn, edata);
}
}
@ -243,11 +243,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
true, &szind, &slab);
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind == edata_szind_get(edata));
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
assert(slab == edata_slab_get(edata));
}
if (likely(slab)) {
@ -269,8 +269,8 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
edata_t *edata = iealloc(tsdn, ptr);
large_dalloc(tsdn, edata);
}
}
@ -300,11 +300,11 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind == edata_szind_get(edata));
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
assert(slab == edata_slab_get(edata));
}
if (likely(slab)) {
@ -344,10 +344,10 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
edata_t *edata = rtree_edata_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
assert(szind == edata_szind_get(edata));
assert(slab == edata_slab_get(edata));
}
}
@ -401,10 +401,10 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
extent_t *extent = rtree_extent_read(tsdn,
edata_t *edata = rtree_edata_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
assert(szind == edata_szind_get(edata));
assert(slab == edata_slab_get(edata));
}
if (likely(slab)) {

View File

@ -94,8 +94,8 @@ struct arena_stats_s {
*/
atomic_zu_t retained; /* Derived. */
/* Number of extent_t structs allocated by base, but not being used. */
atomic_zu_t extent_avail;
/* Number of edata_t structs allocated by base, but not being used. */
atomic_zu_t edata_avail;
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;

View File

@ -144,7 +144,7 @@ struct arena_s {
*
* Synchronization: large_mtx.
*/
extent_list_t large;
edata_list_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
@ -185,14 +185,14 @@ struct arena_s {
malloc_mutex_t extent_grow_mtx;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
* Available edata structures that were allocated via
* base_alloc_edata().
*
* Synchronization: extent_avail_mtx.
* Synchronization: edata_avail_mtx.
*/
extent_tree_t extent_avail;
atomic_zu_t extent_avail_cnt;
malloc_mutex_t extent_avail_mtx;
edata_tree_t edata_avail;
atomic_zu_t edata_avail_cnt;
malloc_mutex_t edata_avail_mtx;
/*
* bins is used to store heaps of free regions.

View File

@ -11,7 +11,7 @@ ehooks_t *base_ehooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);

View File

@ -16,7 +16,7 @@ struct base_block_s {
base_block_t *next;
/* Tracks unused trailing space. */
extent_t extent;
edata_t edata;
};
struct base_s {
@ -47,7 +47,7 @@ struct base_s {
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t avail[SC_NSIZES];
edata_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;

View File

@ -22,17 +22,17 @@ struct bin_s {
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t *slabcur;
edata_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t slabs_nonfull;
edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
extent_list_t slabs_full;
edata_list_t slabs_full;
/* Bin statistics. */
bin_stats_t stats;

View File

@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h"
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */

View File

@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_H
#define JEMALLOC_INTERNAL_EXTENT_H
#ifndef JEMALLOC_INTERNAL_EDATA_H
#define JEMALLOC_INTERNAL_EDATA_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin_info.h"
@ -26,11 +26,11 @@ enum extent_head_state_e {
typedef enum extent_head_state_e extent_head_state_t;
/* Extent (span of pages). Use accessor functions for e_* fields. */
typedef struct extent_s extent_t;
typedef ql_head(extent_t) extent_list_t;
typedef ph(extent_t) extent_tree_t;
typedef ph(extent_t) extent_heap_t;
struct extent_s {
typedef struct edata_s edata_t;
typedef ql_head(edata_t) edata_list_t;
typedef ph(edata_t) edata_tree_t;
typedef ph(edata_t) edata_heap_t;
struct edata_s {
/*
* Bitfield containing several fields:
*
@ -105,48 +105,48 @@ struct extent_s {
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EDATA_BITS_ARENA_SHIFT 0
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_WIDTH 1
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
#define EDATA_BITS_SLAB_WIDTH 1
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_WIDTH 1
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_COMMITTED_WIDTH 1
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_WIDTH 1
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
#define EDATA_BITS_DUMPABLE_WIDTH 1
#define EDATA_BITS_DUMPABLE_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_DUMPABLE_MASK MASK(EDATA_BITS_DUMPABLE_WIDTH, EDATA_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_WIDTH 1
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
#define EDATA_BITS_ZEROED_WIDTH 1
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_DUMPABLE_WIDTH + EDATA_BITS_DUMPABLE_SHIFT)
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_WIDTH 2
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EDATA_BITS_STATE_WIDTH 2
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_WIDTH 6
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_BINSHARD_WIDTH 6
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_IS_HEAD_WIDTH 1
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
#define EDATA_BITS_IS_HEAD_WIDTH 1
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
#define EDATA_BITS_SN_SHIFT (EDATA_BITS_IS_HEAD_WIDTH + EDATA_BITS_IS_HEAD_SHIFT)
#define EDATA_BITS_SN_MASK (UINT64_MAX << EDATA_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
@ -160,8 +160,8 @@ struct extent_s {
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
#define EDATA_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
@ -173,13 +173,13 @@ struct extent_s {
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm(extent_t) ql_link;
ql_elm(edata_t) ql_link;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn(extent_t) ph_link;
phn(edata_t) ph_link;
union {
/* Small region slab metadata. */
@ -196,398 +196,397 @@ struct extent_s {
};
static inline unsigned
extent_arena_ind_get(const extent_t *extent) {
unsigned arena_ind = (unsigned)((extent->e_bits &
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
edata_arena_ind_get(const edata_t *edata) {
unsigned arena_ind = (unsigned)((edata->e_bits &
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return arena_ind;
}
static inline szind_t
extent_szind_get_maybe_invalid(const extent_t *extent) {
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
EXTENT_BITS_SZIND_SHIFT);
edata_szind_get_maybe_invalid(const edata_t *edata) {
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
EDATA_BITS_SZIND_SHIFT);
assert(szind <= SC_NSIZES);
return szind;
}
static inline szind_t
extent_szind_get(const extent_t *extent) {
szind_t szind = extent_szind_get_maybe_invalid(extent);
edata_szind_get(const edata_t *edata) {
szind_t szind = edata_szind_get_maybe_invalid(edata);
assert(szind < SC_NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
extent_usize_get(const extent_t *extent) {
return sz_index2size(extent_szind_get(extent));
edata_usize_get(const edata_t *edata) {
return sz_index2size(edata_szind_get(edata));
}
static inline unsigned
extent_binshard_get(const extent_t *extent) {
unsigned binshard = (unsigned)((extent->e_bits &
EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
edata_binshard_get(const edata_t *edata) {
unsigned binshard = (unsigned)((edata->e_bits &
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
return binshard;
}
static inline size_t
extent_sn_get(const extent_t *extent) {
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
EXTENT_BITS_SN_SHIFT);
edata_sn_get(const edata_t *edata) {
return (size_t)((edata->e_bits & EDATA_BITS_SN_MASK) >>
EDATA_BITS_SN_SHIFT);
}
static inline extent_state_t
extent_state_get(const extent_t *extent) {
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
EXTENT_BITS_STATE_SHIFT);
edata_state_get(const edata_t *edata) {
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
EDATA_BITS_STATE_SHIFT);
}
static inline bool
extent_zeroed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
EXTENT_BITS_ZEROED_SHIFT);
edata_zeroed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
EDATA_BITS_ZEROED_SHIFT);
}
static inline bool
extent_committed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
EXTENT_BITS_COMMITTED_SHIFT);
edata_committed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
EDATA_BITS_COMMITTED_SHIFT);
}
static inline bool
extent_dumpable_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
EXTENT_BITS_DUMPABLE_SHIFT);
edata_dumpable_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_DUMPABLE_MASK) >>
EDATA_BITS_DUMPABLE_SHIFT);
}
static inline bool
extent_slab_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
EXTENT_BITS_SLAB_SHIFT);
edata_slab_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
EDATA_BITS_SLAB_SHIFT);
}
static inline unsigned
extent_nfree_get(const extent_t *extent) {
assert(extent_slab_get(extent));
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
EXTENT_BITS_NFREE_SHIFT);
edata_nfree_get(const edata_t *edata) {
assert(edata_slab_get(edata));
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
EDATA_BITS_NFREE_SHIFT);
}
static inline void *
extent_base_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return PAGE_ADDR2BASE(extent->e_addr);
edata_base_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
return PAGE_ADDR2BASE(edata->e_addr);
}
static inline void *
extent_addr_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return extent->e_addr;
edata_addr_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
return edata->e_addr;
}
static inline size_t
extent_size_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_SIZE_MASK);
edata_size_get(const edata_t *edata) {
return (edata->e_size_esn & EDATA_SIZE_MASK);
}
static inline size_t
extent_esn_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_ESN_MASK);
edata_esn_get(const edata_t *edata) {
return (edata->e_size_esn & EDATA_ESN_MASK);
}
static inline size_t
extent_bsize_get(const extent_t *extent) {
return extent->e_bsize;
edata_bsize_get(const edata_t *edata) {
return edata->e_bsize;
}
static inline void *
extent_before_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
edata_before_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
}
static inline void *
extent_last_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE);
edata_last_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
edata_size_get(edata) - PAGE);
}
static inline void *
extent_past_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent));
edata_past_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
edata_size_get(edata));
}
static inline slab_data_t *
extent_slab_data_get(extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
edata_slab_data_get(edata_t *edata) {
assert(edata_slab_get(edata));
return &edata->e_slab_data;
}
static inline const slab_data_t *
extent_slab_data_get_const(const extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
edata_slab_data_get_const(const edata_t *edata) {
assert(edata_slab_get(edata));
return &edata->e_slab_data;
}
static inline void
extent_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
edata_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
assert(prof_info != NULL);
prof_info->alloc_tctx = (prof_tctx_t *)atomic_load_p(
&extent->e_prof_tctx, ATOMIC_ACQUIRE);
prof_info->alloc_time = extent->e_alloc_time;
&edata->e_prof_tctx, ATOMIC_ACQUIRE);
prof_info->alloc_time = edata->e_alloc_time;
}
static inline void
extent_arena_ind_set(extent_t *extent, unsigned arena_ind) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
}
static inline void
extent_binshard_set(extent_t *extent, unsigned binshard) {
edata_binshard_set(edata_t *edata, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
}
static inline void
extent_addr_set(extent_t *extent, void *addr) {
extent->e_addr = addr;
edata_addr_set(edata_t *edata, void *addr) {
edata->e_addr = addr;
}
static inline void
extent_size_set(extent_t *extent, size_t size) {
assert((size & ~EXTENT_SIZE_MASK) == 0);
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
edata_size_set(edata_t *edata, size_t size) {
assert((size & ~EDATA_SIZE_MASK) == 0);
edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
}
static inline void
extent_esn_set(extent_t *extent, size_t esn) {
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
EXTENT_ESN_MASK);
edata_esn_set(edata_t *edata, size_t esn) {
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
EDATA_ESN_MASK);
}
static inline void
extent_bsize_set(extent_t *extent, size_t bsize) {
extent->e_bsize = bsize;
edata_bsize_set(edata_t *edata, size_t bsize) {
edata->e_bsize = bsize;
}
static inline void
extent_szind_set(extent_t *extent, szind_t szind) {
edata_szind_set(edata_t *edata, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
}
static inline void
extent_nfree_set(extent_t *extent, unsigned nfree) {
assert(extent_slab_get(extent));
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
edata_nfree_set(edata_t *edata, unsigned nfree) {
assert(edata_slab_get(edata));
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
extent->e_bits = (extent->e_bits &
(~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits &
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_inc(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
edata_nfree_inc(edata_t *edata) {
assert(edata_slab_get(edata));
edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_dec(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
edata_nfree_dec(edata_t *edata) {
assert(edata_slab_get(edata));
edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_sub(extent_t *extent, uint64_t n) {
assert(extent_slab_get(extent));
extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
edata_nfree_sub(edata_t *edata, uint64_t n) {
assert(edata_slab_get(edata));
edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
}
static inline void
extent_sn_set(extent_t *extent, size_t sn) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
edata_sn_set(edata_t *edata, size_t sn) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SN_MASK) |
((uint64_t)sn << EDATA_BITS_SN_SHIFT);
}
static inline void
extent_state_set(extent_t *extent, extent_state_t state) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
edata_state_set(edata_t *edata, extent_state_t state) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
}
static inline void
extent_zeroed_set(extent_t *extent, bool zeroed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
edata_zeroed_set(edata_t *edata, bool zeroed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
}
static inline void
extent_committed_set(extent_t *extent, bool committed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
edata_committed_set(edata_t *edata, bool committed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
}
static inline void
extent_dumpable_set(extent_t *extent, bool dumpable) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
edata_dumpable_set(edata_t *edata, bool dumpable) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_DUMPABLE_MASK) |
((uint64_t)dumpable << EDATA_BITS_DUMPABLE_SHIFT);
}
static inline void
extent_slab_set(extent_t *extent, bool slab) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
edata_slab_set(edata_t *edata, bool slab) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
}
static inline void
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
atomic_store_p(&edata->e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
extent_prof_alloc_time_set(extent_t *extent, nstime_t *t) {
nstime_copy(&extent->e_alloc_time, t);
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
nstime_copy(&edata->e_alloc_time, t);
}
static inline bool
extent_is_head_get(extent_t *extent) {
edata_is_head_get(edata_t *edata) {
if (maps_coalesce) {
not_reached();
}
return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
EXTENT_BITS_IS_HEAD_SHIFT);
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
extent_is_head_set(extent_t *extent, bool is_head) {
edata_is_head_set(edata_t *edata, bool is_head) {
if (maps_coalesce) {
not_reached();
}
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
extent_init(extent_t *extent, unsigned arena_ind, void *addr, size_t size,
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed, bool dumpable, extent_head_state_t is_head) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_ind_set(extent, arena_ind);
extent_addr_set(extent, addr);
extent_size_set(extent, size);
extent_slab_set(extent, slab);
extent_szind_set(extent, szind);
extent_sn_set(extent, sn);
extent_state_set(extent, state);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
extent_dumpable_set(extent, dumpable);
ql_elm_new(extent, ql_link);
edata_arena_ind_set(edata, arena_ind);
edata_addr_set(edata, addr);
edata_size_set(edata, size);
edata_slab_set(edata, slab);
edata_szind_set(edata, szind);
edata_sn_set(edata, sn);
edata_state_set(edata, state);
edata_zeroed_set(edata, zeroed);
edata_committed_set(edata, committed);
edata_dumpable_set(edata, dumpable);
ql_elm_new(edata, ql_link);
if (!maps_coalesce) {
extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
false);
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
}
if (config_prof) {
extent_prof_tctx_set(extent, NULL);
edata_prof_tctx_set(edata, NULL);
}
}
static inline void
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_arena_ind_set(extent, (1U << MALLOCX_ARENA_BITS) - 1);
extent_addr_set(extent, addr);
extent_bsize_set(extent, bsize);
extent_slab_set(extent, false);
extent_szind_set(extent, SC_NSIZES);
extent_sn_set(extent, sn);
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
extent_dumpable_set(extent, true);
edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
edata_addr_set(edata, addr);
edata_bsize_set(edata, bsize);
edata_slab_set(edata, false);
edata_szind_set(edata, SC_NSIZES);
edata_sn_set(edata, sn);
edata_state_set(edata, extent_state_active);
edata_zeroed_set(edata, true);
edata_committed_set(edata, true);
edata_dumpable_set(edata, true);
}
static inline void
extent_list_init(extent_list_t *list) {
edata_list_init(edata_list_t *list) {
ql_new(list);
}
static inline extent_t *
extent_list_first(const extent_list_t *list) {
static inline edata_t *
edata_list_first(const edata_list_t *list) {
return ql_first(list);
}
static inline extent_t *
extent_list_last(const extent_list_t *list) {
static inline edata_t *
edata_list_last(const edata_list_t *list) {
return ql_last(list, ql_link);
}
static inline void
extent_list_append(extent_list_t *list, extent_t *extent) {
ql_tail_insert(list, extent, ql_link);
edata_list_append(edata_list_t *list, edata_t *edata) {
ql_tail_insert(list, edata, ql_link);
}
static inline void
extent_list_prepend(extent_list_t *list, extent_t *extent) {
ql_head_insert(list, extent, ql_link);
edata_list_prepend(edata_list_t *list, edata_t *edata) {
ql_head_insert(list, edata, ql_link);
}
static inline void
extent_list_replace(extent_list_t *list, extent_t *to_remove,
extent_t *to_insert) {
edata_list_replace(edata_list_t *list, edata_t *to_remove,
edata_t *to_insert) {
ql_after_insert(to_remove, to_insert, ql_link);
ql_remove(list, to_remove, ql_link);
}
static inline void
extent_list_remove(extent_list_t *list, extent_t *extent) {
ql_remove(list, extent, ql_link);
edata_list_remove(edata_list_t *list, edata_t *edata) {
ql_remove(list, edata, ql_link);
}
static inline int
extent_sn_comp(const extent_t *a, const extent_t *b) {
size_t a_sn = extent_sn_get(a);
size_t b_sn = extent_sn_get(b);
edata_sn_comp(const edata_t *a, const edata_t *b) {
size_t a_sn = edata_sn_get(a);
size_t b_sn = edata_sn_get(b);
return (a_sn > b_sn) - (a_sn < b_sn);
}
static inline int
extent_esn_comp(const extent_t *a, const extent_t *b) {
size_t a_esn = extent_esn_get(a);
size_t b_esn = extent_esn_get(b);
edata_esn_comp(const edata_t *a, const edata_t *b) {
size_t a_esn = edata_esn_get(a);
size_t b_esn = edata_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
edata_ad_comp(const edata_t *a, const edata_t *b) {
uintptr_t a_addr = (uintptr_t)edata_addr_get(a);
uintptr_t b_addr = (uintptr_t)edata_addr_get(b);
return (a_addr > b_addr) - (a_addr < b_addr);
}
static inline int
extent_ead_comp(const extent_t *a, const extent_t *b) {
edata_ead_comp(const edata_t *a, const edata_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
@ -595,32 +594,32 @@ extent_ead_comp(const extent_t *a, const extent_t *b) {
}
static inline int
extent_snad_comp(const extent_t *a, const extent_t *b) {
edata_snad_comp(const edata_t *a, const edata_t *b) {
int ret;
ret = extent_sn_comp(a, b);
ret = edata_sn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ad_comp(a, b);
ret = edata_ad_comp(a, b);
return ret;
}
static inline int
extent_esnead_comp(const extent_t *a, const extent_t *b) {
edata_esnead_comp(const edata_t *a, const edata_t *b) {
int ret;
ret = extent_esn_comp(a, b);
ret = edata_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ead_comp(a, b);
ret = edata_ead_comp(a, b);
return ret;
}
ph_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
ph_proto(, edata_avail_, edata_tree_t, edata_t)
ph_proto(, edata_heap_, edata_heap_t, edata_t)
#endif /* JEMALLOC_INTERNAL_EXTENT_H */
#endif /* JEMALLOC_INTERNAL_EDATA_H */

View File

@ -19,7 +19,7 @@ struct eset_s {
*
* Synchronization: mtx.
*/
extent_heap_t heaps[SC_NPSIZES + 1];
edata_heap_t heaps[SC_NPSIZES + 1];
atomic_zu_t nextents[SC_NPSIZES + 1];
atomic_zu_t nbytes[SC_NPSIZES + 1];
@ -35,7 +35,7 @@ struct eset_s {
*
* Synchronization: mtx.
*/
extent_list_t lru;
edata_list_t lru;
/*
* Page sum for all extents in heaps.
@ -67,13 +67,13 @@ size_t eset_nextents_get(eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
/*
* Select an extent from this eset of the given size and alignment. Returns
* null if no such item could be found.
*/
extent_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
edata_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
size_t alignment);
void eset_prefork(tsdn_t *tsdn, eset_t *eset);

View File

@ -26,38 +26,38 @@ extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree;
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
edata_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, edata_t *edata);
edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, size_t npages_min);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent);
edata_t *edata);
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent);
edata_t *edata);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent, size_t offset, size_t length);
edata_t *edata, size_t offset, size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent, size_t offset, size_t length);
edata_t *edata, size_t offset, size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent, size_t offset, size_t length);
edata_t *edata, size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent, size_t offset, size_t length);
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a,
edata_t *edata, size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *a, extent_t *b);
bool extent_head_no_merge(extent_t *a, extent_t *b);
edata_t *a, edata_t *b);
bool extent_head_no_merge(edata_t *a, edata_t *b);
bool extent_boot(void);

View File

@ -76,12 +76,12 @@ arena_is_auto(arena_t *arena) {
return (arena_ind_get(arena) < manual_arena_base);
}
JEMALLOC_ALWAYS_INLINE extent_t *
JEMALLOC_ALWAYS_INLINE edata_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
return rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
}

View File

@ -6,7 +6,7 @@
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero);
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
@ -18,12 +18,12 @@ extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
void large_prof_info_get(const extent_t *extent, prof_info_t *prof_info);
void large_prof_tctx_reset(extent_t *extent);
void large_prof_info_set(extent_t *extent, prof_tctx_t *tctx);
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
void large_prof_info_get(const edata_t *edata, prof_info_t *prof_info);
void large_prof_tctx_reset(edata_t *edata);
void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx);
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */

View File

@ -48,18 +48,18 @@ struct rtree_leaf_elm_s {
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* memory address bits, the index, edata, and slab fields are packed as
* such:
*
* x: index
* e: extent
* e: edata
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
atomic_p_t le_bits;
#else
atomic_p_t le_extent; /* (extent_t *) */
atomic_p_t le_edata; /* (edata_t *) */
atomic_u_t le_szind; /* (szind_t) */
atomic_b_t le_slab; /* (bool) */
#endif
@ -176,8 +176,8 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
JEMALLOC_ALWAYS_INLINE edata_t *
rtree_leaf_elm_bits_edata_get(uintptr_t bits) {
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
@ -187,10 +187,10 @@ rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
/* Mask off the slab bit. */
uintptr_t low_bit_mask = ~(uintptr_t)1;
uintptr_t mask = high_bit_mask & low_bit_mask;
return (extent_t *)(bits & mask);
return (edata_t *)(bits & mask);
# else
/* Restore sign-extended high bits, mask slab bit. */
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
return (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
RTREE_NHIB) & ~((uintptr_t)0x1));
# endif
}
@ -207,16 +207,16 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
# endif
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
JEMALLOC_ALWAYS_INLINE edata_t *
rtree_leaf_elm_edata_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_extent_get(bits);
return rtree_leaf_elm_bits_edata_get(bits);
#else
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
edata_t *edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
return extent;
return edata;
#endif
}
@ -245,16 +245,16 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
}
static inline void
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, extent_t *extent) {
rtree_leaf_elm_edata_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, edata_t *edata) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
LG_VADDR) | ((uintptr_t)edata & (((uintptr_t)0x1 << LG_VADDR) - 1))
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
atomic_store_p(&elm->le_edata, edata, ATOMIC_RELEASE);
#endif
}
@ -267,7 +267,7 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
((uintptr_t)rtree_leaf_elm_bits_edata_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
@ -283,7 +283,7 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_edata_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
@ -293,20 +293,20 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
static inline void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
rtree_leaf_elm_t *elm, edata_t *edata, szind_t szind, bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)edata & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
* Write edata last, since the element is atomically considered valid
* as soon as the edata field is non-NULL.
*/
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
rtree_leaf_elm_edata_write(tsdn, rtree, elm, edata);
#endif
}
@ -317,7 +317,7 @@ rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
* and slab fields, and that the edata field cannot currently change.
*/
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
@ -384,9 +384,9 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
static inline bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
extent_t *extent, szind_t szind, bool slab) {
/* Use rtree_clear() to set the extent to NULL. */
assert(extent != NULL);
edata_t *edata, szind_t szind, bool slab) {
/* Use rtree_clear() to set the edata to NULL. */
assert(edata != NULL);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, false, true);
@ -394,8 +394,8 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
return true;
}
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
assert(rtree_leaf_elm_edata_read(tsdn, rtree, elm, false) == NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, edata, szind, slab);
return false;
}
@ -412,15 +412,15 @@ rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
return elm;
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
JEMALLOC_ALWAYS_INLINE edata_t *
rtree_edata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NULL;
}
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
}
JEMALLOC_ALWAYS_INLINE szind_t
@ -440,14 +440,14 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
*/
JEMALLOC_ALWAYS_INLINE bool
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
rtree_edata_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, edata_t **r_edata, szind_t *r_szind) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
*r_edata = rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
return false;
}
@ -520,7 +520,7 @@ static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
assert(rtree_leaf_elm_edata_read(tsdn, rtree, elm, false) !=
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
}

View File

@ -43,7 +43,7 @@
#define WITNESS_RANK_TCACHE_QL 13U
#define WITNESS_RANK_EXTENT_GROW 14U
#define WITNESS_RANK_EXTENTS 15U
#define WITNESS_RANK_EXTENT_AVAIL 16U
#define WITNESS_RANK_EDATA_AVAIL 16U
#define WITNESS_RANK_EXTENT_POOL 17U
#define WITNESS_RANK_RTREE 18U

View File

@ -60,9 +60,9 @@ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all);
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
/******************************************************************************/
@ -102,8 +102,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_zu(&astats->retained,
eset_npages_get(&arena->eset_retained) << LG_PAGE);
atomic_store_zu(&astats->extent_avail,
atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
atomic_store_zu(&astats->edata_avail,
atomic_load_zu(&arena->edata_avail_cnt, ATOMIC_RELAXED),
ATOMIC_RELAXED);
arena_stats_accum_u64(&astats->decay_dirty.npurge,
@ -224,7 +224,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
READ_ARENA_MUTEX_PROF_DATA(edata_avail_mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(eset_dirty.mtx,
arena_prof_mutex_extents_dirty)
@ -254,11 +254,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
void
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent) {
edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, extent);
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, edata);
if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true);
} else {
@ -267,34 +267,34 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
}
static void *
arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
void *ret;
slab_data_t *slab_data = extent_slab_data_get(slab);
slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind;
assert(extent_nfree_get(slab) > 0);
assert(edata_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)extent_addr_get(slab) +
ret = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
extent_nfree_dec(slab);
edata_nfree_dec(slab);
return ret;
}
static void
arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) {
slab_data_t *slab_data = extent_slab_data_get(slab);
slab_data_t *slab_data = edata_slab_data_get(slab);
assert(extent_nfree_get(slab) >= cnt);
assert(edata_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info);
*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
*(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
}
#else
@ -315,7 +315,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* hot loop below.
*/
uintptr_t base = (uintptr_t)extent_addr_get(slab);
uintptr_t base = (uintptr_t)edata_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) {
size_t bit = cfs_lu(&g);
@ -327,24 +327,24 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data->bitmap[group] = g;
}
#endif
extent_nfree_sub(slab, cnt);
edata_nfree_sub(slab, cnt);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(&arena_binind_div_info[binind], diff);
@ -355,17 +355,17 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
}
static void
arena_slab_reg_dalloc(extent_t *slab, slab_data_t *slab_data, void *ptr) {
szind_t binind = extent_szind_get(slab);
arena_slab_reg_dalloc(edata_t *slab, slab_data_t *slab_data, void *ptr) {
szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(slab, binind, ptr);
assert(extent_nfree_get(slab) < bin_info->nregs);
assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
extent_nfree_inc(slab);
edata_nfree_inc(slab);
}
static void
@ -423,7 +423,7 @@ arena_may_have_muzzy(arena_t *arena) {
return arena_muzzy_decay_ms_get(arena) != 0;
}
extent_t *
edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) {
ehooks_t *ehooks = arena_get_ehooks(arena);
@ -434,23 +434,22 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
szind_t szind = sz_size2index(usize);
size_t mapped_add;
bool commit = true;
extent_t *extent = extents_alloc(tsdn, arena, ehooks,
&arena->eset_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit);
if (extent == NULL && arena_may_have_muzzy(arena)) {
extent = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
edata_t *edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit);
if (edata == NULL && arena_may_have_muzzy(arena)) {
edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
NULL, usize, sz_large_pad, alignment, false, szind, zero,
&commit);
}
size_t size = usize + sz_large_pad;
if (extent == NULL) {
extent = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
if (edata == NULL) {
edata = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
sz_large_pad, alignment, false, szind, zero, &commit);
if (config_stats) {
/*
* extent may be NULL on OOM, but in that case
* mapped_add isn't used below, so there's no need to
* conditionlly set it to 0 here.
* edata may be NULL on OOM, but in that case mapped_add
* isn't used below, so there's no need to conditionlly
* set it to 0 here.
*/
mapped_add = size;
}
@ -458,7 +457,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
mapped_add = 0;
}
if (extent != NULL) {
if (edata != NULL) {
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_large_malloc_stats_update(tsdn, arena, usize);
@ -471,24 +470,24 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, size >> LG_PAGE);
}
return extent;
return edata;
}
void
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_large_dalloc_stats_update(tsdn, arena,
extent_usize_get(extent));
edata_usize_get(edata));
arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
arena_nactive_sub(arena, edata_size_get(edata) >> LG_PAGE);
}
void
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t usize = edata_usize_get(edata);
size_t udiff = oldusize - usize;
if (config_stats) {
@ -500,9 +499,9 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
void
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t usize = edata_usize_get(edata);
size_t udiff = usize - oldusize;
if (config_stats) {
@ -819,25 +818,25 @@ arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
static size_t
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, eset_t *eset, size_t npages_limit,
size_t npages_decay_max, extent_list_t *decay_extents) {
size_t npages_decay_max, edata_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
extent_t *extent;
edata_t *edata;
while (nstashed < npages_decay_max &&
(extent = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
(edata = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
!= NULL) {
extent_list_append(decay_extents, extent);
nstashed += extent_size_get(extent) >> LG_PAGE;
edata_list_append(decay_extents, edata);
nstashed += edata_size_get(edata) >> LG_PAGE;
}
return nstashed;
}
static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
arena_decay_t *decay, eset_t *eset, bool all, extent_list_t *decay_extents,
arena_decay_t *decay, eset_t *eset, bool all, edata_list_t *decay_extents,
bool is_background_thread) {
size_t nmadvise, nunmapped;
size_t npurged;
@ -849,31 +848,30 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
npurged = 0;
ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
for (extent_t *extent = extent_list_first(decay_extents); extent !=
NULL; extent = extent_list_first(decay_extents)) {
for (edata_t *edata = edata_list_first(decay_extents); edata !=
NULL; edata = edata_list_first(decay_extents)) {
if (config_stats) {
nmadvise++;
}
size_t npages = extent_size_get(extent) >> LG_PAGE;
size_t npages = edata_size_get(edata) >> LG_PAGE;
npurged += npages;
extent_list_remove(decay_extents, extent);
edata_list_remove(decay_extents, edata);
switch (eset_state_get(eset)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
if (!all && muzzy_decay_ms != 0 &&
!extent_purge_lazy_wrapper(tsdn, arena,
ehooks, extent, 0,
extent_size_get(extent))) {
ehooks, edata, 0, edata_size_get(edata))) {
extents_dalloc(tsdn, arena, ehooks,
&arena->eset_muzzy, extent);
&arena->eset_muzzy, edata);
arena_background_thread_inactivity_check(tsdn,
arena, is_background_thread);
break;
}
JEMALLOC_FALLTHROUGH;
case extent_state_muzzy:
extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
if (config_stats) {
nunmapped += npages;
}
@ -923,8 +921,8 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
ehooks_t *ehooks = arena_get_ehooks(arena);
extent_list_t decay_extents;
extent_list_init(&decay_extents);
edata_list_t decay_extents;
edata_list_init(&decay_extents);
size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, eset,
npages_limit, npages_decay_max, &decay_extents);
@ -1000,33 +998,33 @@ arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
}
static void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE);
ehooks_t *ehooks = arena_get_ehooks(arena);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab);
}
static void
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab);
arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
assert(edata_nfree_get(slab) > 0);
edata_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
}
static void
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab);
arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
edata_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
}
static extent_t *
static edata_t *
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return NULL;
}
@ -1038,30 +1036,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
static void
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) == 0);
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
assert(edata_nfree_get(slab) == 0);
/*
* Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the extent
* for auto arenas. Bypass this step to avoid touching the edata
* linkage (often results in cache misses) for auto arenas.
*/
if (arena_is_auto(arena)) {
return;
}
extent_list_append(&bin->slabs_full, slab);
edata_list_append(&bin->slabs_full, slab);
}
static void
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
extent_list_remove(&bin->slabs_full, slab);
edata_list_remove(&bin->slabs_full, slab);
}
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
extent_t *slab;
edata_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
@ -1071,13 +1069,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
for (slab = edata_list_first(&bin->slabs_full); slab != NULL;
slab = edata_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
@ -1109,9 +1107,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
for (extent_t *extent = extent_list_first(&arena->large); extent !=
NULL; extent = extent_list_first(&arena->large)) {
void *ptr = extent_base_get(extent);
for (edata_t *edata = edata_list_first(&arena->large); edata !=
NULL; edata = edata_list_first(&arena->large)) {
void *ptr = edata_base_get(edata);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@ -1129,7 +1127,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx);
}
large_dalloc(tsd_tsdn(tsd), extent);
large_dalloc(tsd_tsdn(tsd), edata);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@ -1157,10 +1155,10 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = arena_get_ehooks(arena);
extent_t *extent;
while ((extent = extents_evict(tsdn, arena, ehooks,
edata_t *edata;
while ((edata = extents_evict(tsdn, arena, ehooks,
&arena->eset_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, arena, ehooks, extent);
extent_destroy_wrapper(tsdn, arena, ehooks, edata);
}
}
@ -1200,10 +1198,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
base_delete(tsd_tsdn(tsd), arena->base);
}
static extent_t *
static edata_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
const bin_info_t *bin_info, szind_t szind) {
extent_t *slab;
edata_t *slab;
bool zero, commit;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@ -1222,7 +1220,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return slab;
}
static extent_t *
static edata_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@ -1232,7 +1230,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
szind_t szind = sz_size2index(bin_info->reg_size);
bool zero = false;
bool commit = true;
extent_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
edata_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit);
if (slab == NULL && arena_may_have_muzzy(arena)) {
slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
@ -1246,22 +1244,22 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
return NULL;
}
}
assert(extent_slab_get(slab));
assert(edata_slab_get(slab));
/* Initialize slab internals. */
slab_data_t *slab_data = extent_slab_data_get(slab);
extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
slab_data_t *slab_data = edata_slab_data_get(slab);
edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
arena_nactive_add(arena, edata_size_get(slab) >> LG_PAGE);
return slab;
}
static extent_t *
static edata_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, unsigned binshard) {
extent_t *slab;
edata_t *slab;
const bin_info_t *bin_info;
/* Look for a usable slab. */
@ -1307,14 +1305,14 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
if (bin->slabcur != NULL) {
/* Only attempted when current slab is full. */
assert(extent_nfree_get(bin->slabcur) == 0);
assert(edata_nfree_get(bin->slabcur) == 0);
}
const bin_info_t *bin_info = &bin_infos[binind];
extent_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
edata_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
binshard);
if (bin->slabcur != NULL) {
if (extent_nfree_get(bin->slabcur) > 0) {
if (edata_nfree_get(bin->slabcur) > 0) {
/*
* Another thread updated slabcur while this one ran
* without the bin lock in arena_bin_nonfull_slab_get().
@ -1331,7 +1329,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
if (extent_nfree_get(slab) == bin_info->nregs) {
if (edata_nfree_get(slab) == bin_info->nregs) {
arena_dalloc_bin_slab(tsdn, arena, slab,
bin);
} else {
@ -1350,7 +1348,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
return NULL;
}
bin->slabcur = slab;
assert(extent_nfree_get(bin->slabcur) > 0);
assert(edata_nfree_get(bin->slabcur) > 0);
return arena_slab_reg_alloc(slab, bin_info);
}
@ -1386,12 +1384,12 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
void **empty_position = cache_bin_empty_position_get(tbin, binind);
for (i = 0, nfill = (cache_bin_ncached_max_get(binind) >>
tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
extent_t *slab;
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
edata_t *slab;
if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) >
0) {
unsigned tofill = nfill - i;
cnt = tofill < extent_nfree_get(slab) ?
tofill : extent_nfree_get(slab);
cnt = tofill < edata_nfree_get(slab) ?
tofill : edata_nfree_get(slab);
arena_slab_reg_alloc_batch(
slab, &bin_infos[binind], cnt,
empty_position - nfill + i);
@ -1454,14 +1452,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
bin_t *bin;
size_t usize;
extent_t *slab;
edata_t *slab;
assert(binind < SC_NBINS);
usize = sz_index2size(binind);
unsigned binshard;
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
} else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
@ -1554,11 +1552,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind);
edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
szind, false);
@ -1568,11 +1566,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
}
static size_t
arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
extent_szind_set(extent, SC_NBINS);
edata_szind_set(edata, SC_NBINS);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
@ -1589,9 +1587,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof);
assert(opt_prof);
extent_t *extent = iealloc(tsdn, ptr);
size_t usize = extent_usize_get(extent);
size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
edata_t *edata = iealloc(tsdn, ptr);
size_t usize = edata_usize_get(edata);
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/*
* Currently, we only do redzoning for small sampled
@ -1604,17 +1602,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path);
} else {
large_dalloc(tsdn, extent);
large_dalloc(tsdn, edata);
}
}
static void
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */
if (slab == bin->slabcur) {
bin->slabcur = NULL;
} else {
szind_t binind = extent_szind_get(slab);
szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
/*
@ -1631,7 +1629,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
static void
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
assert(slab != bin->slabcur);
@ -1646,9 +1644,9 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
static void
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
assert(extent_nfree_get(slab) > 0);
assert(edata_nfree_get(slab) > 0);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
@ -1656,9 +1654,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
if (extent_nfree_get(bin->slabcur) > 0) {
if (edata_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
@ -1674,8 +1672,8 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *slab, void *ptr, bool junked) {
slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind, edata_t *slab, void *ptr, bool junked) {
slab_data_t *slab_data = edata_slab_data_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
@ -1683,7 +1681,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
}
arena_slab_reg_dalloc(slab, slab_data, ptr);
unsigned nfree = extent_nfree_get(slab);
unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
arena_dissociate_bin_slab(arena, slab, bin);
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
@ -1700,29 +1698,29 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *extent, void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
szind_t binind, edata_t *edata, void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
true);
}
static void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
szind_t binind = extent_szind_get(extent);
unsigned binshard = extent_binshard_get(extent);
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
szind_t binind = edata_szind_get(edata);
unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
extent_t *extent = iealloc(tsdn, ptr);
arena_t *arena = arena_get_from_extent(extent);
edata_t *edata = iealloc(tsdn, ptr);
arena_t *arena = arena_get_from_edata(edata);
arena_dalloc_bin(tsdn, arena, extent, ptr);
arena_dalloc_bin(tsdn, arena, edata, ptr);
arena_decay_tick(tsdn, arena);
}
@ -1733,7 +1731,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
extent_t *extent = iealloc(tsdn, ptr);
edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
@ -1756,19 +1754,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto done;
}
arena_t *arena = arena_get_from_extent(extent);
arena_t *arena = arena_get_from_edata(edata);
arena_decay_tick(tsdn, arena);
ret = false;
} else if (oldsize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS) {
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
zero);
} else {
ret = true;
}
done:
assert(extent == iealloc(tsdn, ptr));
*newsize = extent_usize_get(extent);
assert(edata == iealloc(tsdn, ptr));
*newsize = edata_usize_get(edata);
return ret;
}
@ -2006,7 +2004,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
extent_list_init(&arena->large);
edata_list_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;
@ -2055,9 +2053,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
extent_avail_new(&arena->extent_avail);
if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
edata_avail_new(&arena->edata_avail);
if (malloc_mutex_init(&arena->edata_avail_mtx, "edata_avail",
WITNESS_RANK_EDATA_AVAIL, malloc_mutex_rank_exclusive)) {
goto label_error;
}
@ -2203,7 +2201,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
malloc_mutex_prefork(tsdn, &arena->edata_avail_mtx);
}
void
@ -2237,7 +2235,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->edata_avail_mtx);
eset_postfork_parent(tsdn, &arena->eset_dirty);
eset_postfork_parent(tsdn, &arena->eset_muzzy);
eset_postfork_parent(tsdn, &arena->eset_retained);
@ -2283,7 +2281,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
malloc_mutex_postfork_child(tsdn, &arena->edata_avail_mtx);
eset_postfork_child(tsdn, &arena->eset_dirty);
eset_postfork_child(tsdn, &arena->eset_muzzy);
eset_postfork_child(tsdn, &arena->eset_retained);

View File

@ -105,14 +105,14 @@ label_done:
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
extent_binit(extent, addr, size, sn);
edata_binit(edata, addr, size, sn);
}
static size_t
@ -158,7 +158,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
@ -166,34 +166,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)edata_addr_get(edata);
ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
assert(edata_bsize_get(edata) >= *gap_size + size);
edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
edata_sn_get(edata));
return ret;
}
static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) {
if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
sz_size2index(edata_bsize_get(edata) + 1) - 1;
edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
@ -218,13 +218,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static void *
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
@ -284,7 +284,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
@ -293,7 +293,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static extent_t *
static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
@ -327,7 +327,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
return &block->edata;
}
base_t *
@ -357,7 +357,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
base->ind = ind;
ehooks_init(&base->ehooks, extent_hooks);
@ -371,7 +371,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]);
edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
@ -384,7 +384,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
@ -422,28 +422,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL;
edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
edata = edata_heap_remove_first(&base->avail[i]);
if (edata != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
if (edata == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
if (edata == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(base, extent, usize, alignment);
ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
*esn = edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
@ -463,16 +463,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
edata_t *
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
CACHELINE, &esn);
if (extent == NULL) {
if (edata == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
edata_esn_set(edata, esn);
return edata;
}
void

View File

@ -45,8 +45,8 @@ bin_init(bin_t *bin) {
return true;
}
bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull);
extent_list_init(&bin->slabs_full);
edata_heap_new(&bin->slabs_nonfull);
edata_list_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}

View File

@ -855,8 +855,8 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
&astats->astats.mapped);
accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained);
accum_atomic_zu(&sdstats->astats.extent_avail,
&astats->astats.extent_avail);
accum_atomic_zu(&sdstats->astats.edata_avail,
&astats->astats.edata_avail);
}
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
@ -2603,18 +2603,18 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
int ret;
unsigned arena_ind;
void *ptr;
extent_t *extent;
edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent == NULL)
edata = iealloc(tsd_tsdn(tsd), ptr);
if (edata == NULL)
goto label_return;
arena = arena_get_from_extent(extent);
arena = arena_get_from_edata(edata);
if (arena == NULL)
goto label_return;
@ -2860,7 +2860,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.edata_avail,
ATOMIC_RELAXED),
size_t)
@ -3010,7 +3010,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->extent_avail_mtx);
MUTEX_PROF_RESET(arena->edata_avail_mtx);
MUTEX_PROF_RESET(arena->eset_dirty.mtx);
MUTEX_PROF_RESET(arena->eset_muzzy.mtx);
MUTEX_PROF_RESET(arena->eset_retained.mtx);

View File

@ -1,6 +1,6 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
ph_gen(, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp)
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
ph_gen(, edata_avail_, edata_tree_t, edata_t, ph_link,
edata_esnead_comp)
ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp)

View File

@ -200,8 +200,8 @@ ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
tsdn_t *tsdn = tsdn_fetch();
extent_t *a = iealloc(tsdn, addr_a);
extent_t *b = iealloc(tsdn, addr_b);
edata_t *a = iealloc(tsdn, addr_a);
edata_t *b = iealloc(tsdn, addr_b);
if (extent_head_no_merge(a, b)) {
return true;
}

View File

@ -16,10 +16,10 @@ eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
extent_heap_new(&eset->heaps[i]);
edata_heap_new(&eset->heaps[i]);
}
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
extent_list_init(&eset->lru);
edata_list_init(&eset->lru);
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state;
eset->delay_coalesce = delay_coalesce;
@ -63,24 +63,24 @@ eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
}
void
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
assert(extent_state_get(extent) == eset->state);
assert(edata_state_get(edata) == eset->state);
size_t size = extent_size_get(extent);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&eset->heaps[pind])) {
if (edata_heap_empty(&eset->heaps[pind])) {
bitmap_unset(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
extent_heap_insert(&eset->heaps[pind], extent);
edata_heap_insert(&eset->heaps[pind], edata);
if (config_stats) {
eset_stats_add(eset, pind, size);
}
extent_list_append(&eset->lru, extent);
edata_list_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
@ -94,24 +94,24 @@ eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
}
void
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
assert(extent_state_get(extent) == eset->state);
assert(edata_state_get(edata) == eset->state);
size_t size = extent_size_get(extent);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&eset->heaps[pind], extent);
edata_heap_remove(&eset->heaps[pind], edata);
if (config_stats) {
eset_stats_sub(eset, pind, size);
}
if (extent_heap_empty(&eset->heaps[pind])) {
if (edata_heap_empty(&eset->heaps[pind])) {
bitmap_set(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
extent_list_remove(&eset->lru, extent);
edata_list_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic
@ -128,7 +128,7 @@ eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
@ -139,10 +139,10 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(!edata_heap_empty(&eset->heaps[i]));
edata_t *edata = edata_heap_first(&eset->heaps[i]);
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
@ -154,7 +154,7 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
return edata;
}
}
@ -165,9 +165,9 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static extent_t *
static edata_t *
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
extent_t *ret = NULL;
edata_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
@ -176,8 +176,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&eset->heaps[pind]) ? NULL :
extent_heap_first(&eset->heaps[pind]);
return edata_heap_empty(&eset->heaps[pind]) ? NULL :
edata_heap_first(&eset->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
@ -185,9 +185,9 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
assert(extent_size_get(extent) >= size);
assert(!edata_heap_empty(&eset->heaps[i]));
edata_t *edata = edata_heap_first(&eset->heaps[i]);
assert(edata_size_get(edata) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large eset for much smaller sizes.
@ -198,8 +198,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
if (ret == NULL || edata_snad_comp(edata, ret) < 0) {
ret = edata;
}
if (i == SC_NPSIZES) {
break;
@ -210,7 +210,7 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
return ret;
}
extent_t *
edata_t *
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
@ -220,18 +220,18 @@ eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
return NULL;
}
extent_t *extent = eset_first_fit_locked(tsdn, eset, max_size);
edata_t *edata = eset_first_fit_locked(tsdn, eset, max_size);
if (alignment > PAGE && extent == NULL) {
if (alignment > PAGE && edata == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = eset_fit_alignment(eset, esize, max_size, alignment);
edata = eset_fit_alignment(eset, esize, max_size, alignment);
}
return extent;
return edata;
}
void

File diff suppressed because it is too large Load Diff

View File

@ -109,7 +109,7 @@ extent_dss_max_update(void *new_addr) {
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
extent_t *gap;
edata_t *gap;
cassert(have_dss);
assert(size > 0);
@ -153,7 +153,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
extent_init(gap, arena_ind_get(arena),
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, arena_extent_sn_next(arena),
extent_state_active, false, true, true,
@ -194,17 +194,17 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
extent_t extent;
edata_t edata;
ehooks_t *ehooks = arena_get_ehooks(
arena);
extent_init(&extent,
edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
true, EXTENT_NOT_HEAD);
if (extent_purge_forced_wrapper(tsdn,
arena, ehooks, &extent, 0, size)) {
arena, ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}

View File

@ -6,21 +6,21 @@ inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
const edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = extent_nfree_get(extent);
*nregs = bin_infos[extent_szind_get(extent)].nregs;
*nfree = edata_nfree_get(edata);
*nregs = bin_infos[edata_szind_get(edata)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
assert(*nfree * edata_usize_get(edata) <= *size);
}
}
@ -31,31 +31,31 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
const edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
}
*nfree = extent_nfree_get(extent);
const szind_t szind = extent_szind_get(extent);
*nfree = edata_nfree_get(edata);
const szind_t szind = edata_szind_get(edata);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
assert(*nfree * edata_usize_get(edata) <= *size);
const arena_t *arena = (arena_t *)atomic_load_p(
&arenas[extent_arena_ind_get(extent)], ATOMIC_RELAXED);
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
assert(arena != NULL);
const unsigned binshard = extent_binshard_get(extent);
const unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
@ -66,12 +66,12 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
} else {
*bin_nfree = *bin_nregs = 0;
}
extent_t *slab;
edata_t *slab;
if (bin->slabcur != NULL) {
slab = bin->slabcur;
} else {
slab = extent_heap_first(&bin->slabs_nonfull);
slab = edata_heap_first(&bin->slabs_nonfull);
}
*slabcur_addr = slab != NULL ? extent_addr_get(slab) : NULL;
*slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
malloc_mutex_unlock(tsdn, &bin->lock);
}

View File

@ -21,7 +21,7 @@ void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
extent_t *extent;
edata_t *edata;
bool is_zeroed;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
@ -44,28 +44,28 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
return NULL;
}
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
/* Insert extent into large. */
/* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
extent_list_append(&arena->large, extent);
edata_list_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
if (zero) {
assert(is_zeroed);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
extent_usize_get(extent));
memset(edata_addr_get(edata), JEMALLOC_ALLOC_JUNK,
edata_usize_get(edata));
}
arena_decay_tick(tsdn, arena);
return extent_addr_get(extent);
return edata_addr_get(edata);
}
static void
@ -90,11 +90,11 @@ large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
large_dalloc_maybe_junk_impl;
static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
arena_t *arena = arena_get_from_extent(extent);
size_t oldusize = extent_usize_get(extent);
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata);
size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
size_t diff = edata_size_get(edata) - (usize + sz_large_pad);
assert(oldusize > usize);
@ -104,31 +104,31 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
ehooks, extent, usize + sz_large_pad, sz_size2index(usize),
edata_t *trail = extent_split_wrapper(tsdn, arena,
ehooks, edata, usize + sz_large_pad, sz_size2index(usize),
false, diff, SC_NSIZES, false);
if (trail == NULL) {
return true;
}
if (config_fill && unlikely(opt_junk_free)) {
large_dalloc_maybe_junk(extent_addr_get(trail),
extent_size_get(trail));
large_dalloc_maybe_junk(edata_addr_get(trail),
edata_size_get(trail));
}
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
}
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
arena_extent_ralloc_large_shrink(tsdn, arena, edata, oldusize);
return false;
}
static bool
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
arena_t *arena = arena_get_from_extent(extent);
size_t oldusize = extent_usize_get(extent);
arena_t *arena = arena_get_from_edata(edata);
size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t trailsize = usize - oldusize;
@ -147,20 +147,20 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
*/
bool is_zeroed_trail = zero;
bool commit = true;
extent_t *trail;
edata_t *trail;
bool new_mapping;
if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL
|| (trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL) {
if (config_stats) {
new_mapping = false;
}
} else {
if ((trail = extent_alloc_wrapper(tsdn, arena, ehooks,
extent_past_get(extent), trailsize, 0, CACHELINE, false,
edata_past_get(edata), trailsize, 0, CACHELINE, false,
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
return true;
}
@ -169,16 +169,16 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
}
if (extent_merge_wrapper(tsdn, arena, ehooks, extent, trail)) {
if (extent_merge_wrapper(tsdn, arena, ehooks, edata, trail)) {
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
return true;
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind);
edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(extent), szind, false);
(uintptr_t)edata_addr_get(edata), szind, false);
if (config_stats && new_mapping) {
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
@ -194,7 +194,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((uintptr_t)extent_addr_get(extent) + oldusize);
((uintptr_t)edata_addr_get(edata) + oldusize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
@ -203,19 +203,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
assert(is_zeroed_trail);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
memset((void *)((uintptr_t)edata_addr_get(edata) + oldusize),
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize);
return false;
}
bool
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero) {
size_t oldusize = extent_usize_get(extent);
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
@ -225,16 +225,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
large_ralloc_no_move_expand(tsdn, extent, usize_min,
zero)) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@ -244,14 +243,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
* the new size.
*/
if (oldusize >= usize_min && oldusize <= usize_max) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Attempt to shrink the allocation in-place. */
if (oldusize > usize_max) {
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@ -271,9 +270,9 @@ void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
extent_t *extent = iealloc(tsdn, ptr);
edata_t *edata = iealloc(tsdn, ptr);
size_t oldusize = extent_usize_get(extent);
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
@ -281,11 +280,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
return extent_addr_get(extent);
return edata_addr_get(edata);
}
/*
@ -306,8 +305,8 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
memcpy(ret, extent_addr_get(extent), copysize);
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
memcpy(ret, edata_addr_get(edata), copysize);
isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
return ret;
}
@ -316,76 +315,75 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
* whether the arena's large_mtx is currently held.
*/
static void
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool junked_locked) {
if (!junked_locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
extent_list_remove(&arena->large, extent);
edata_list_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
large_dalloc_maybe_junk(extent_addr_get(extent),
extent_usize_get(extent));
large_dalloc_maybe_junk(edata_addr_get(edata),
edata_usize_get(edata));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
extent_list_remove(&arena->large, extent);
edata_list_remove(&arena->large, edata);
}
}
arena_extent_dalloc_large_prep(tsdn, arena, extent);
arena_extent_dalloc_large_prep(tsdn, arena, edata);
}
static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
ehooks_t *ehooks = arena_get_ehooks(arena);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, extent);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, edata);
}
void
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
large_dalloc_prep_impl(tsdn, arena_get_from_extent(extent), extent,
true);
large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}
void
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
large_dalloc_finish_impl(tsdn, arena_get_from_extent(extent), extent);
large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
}
void
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
arena_t *arena = arena_get_from_extent(extent);
large_dalloc_prep_impl(tsdn, arena, extent, false);
large_dalloc_finish_impl(tsdn, arena, extent);
large_dalloc(tsdn_t *tsdn, edata_t *edata) {
arena_t *arena = arena_get_from_edata(edata);
large_dalloc_prep_impl(tsdn, arena, edata, false);
large_dalloc_finish_impl(tsdn, arena, edata);
arena_decay_tick(tsdn, arena);
}
size_t
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
return extent_usize_get(extent);
large_salloc(tsdn_t *tsdn, const edata_t *edata) {
return edata_usize_get(edata);
}
void
large_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
extent_prof_info_get(extent, prof_info);
large_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
edata_prof_info_get(edata, prof_info);
}
static void
large_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
extent_prof_tctx_set(extent, tctx);
large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
edata_prof_tctx_set(edata, tctx);
}
void
large_prof_tctx_reset(extent_t *extent) {
large_prof_tctx_set(extent, (prof_tctx_t *)(uintptr_t)1U);
large_prof_tctx_reset(edata_t *edata) {
large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
}
void
large_prof_info_set(extent_t *extent, prof_tctx_t *tctx) {
large_prof_tctx_set(extent, tctx);
large_prof_info_set(edata_t *edata, prof_tctx_t *tctx) {
large_prof_tctx_set(edata, tctx);
nstime_t t;
nstime_init_update(&t);
extent_prof_alloc_time_set(extent, &t);
edata_prof_alloc_time_set(edata, &t);
}

View File

@ -114,8 +114,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
/* Enabled with --enable-extra-size-check. */
static void
tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t nflush, extent_t **extents){
tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t nflush, edata_t **edatas){
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@ -129,9 +129,9 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t sz_sum = binind * nflush;
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
for (unsigned i = 0 ; i < nflush; i++) {
rtree_extent_szind_read(tsdn, &extents_rtree,
rtree_edata_szind_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)*(bottom_item - i), true,
&extents[i], &szind);
&edatas[i], &szind);
sz_sum -= szind;
}
if (sz_sum != 0) {
@ -154,26 +154,26 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
arena_t *arena = tcache->arena;
assert(arena != NULL);
unsigned nflush = ncached - rem;
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
VARIABLE_ARRAY(edata_t *, item_edata, nflush);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
/* Look up extent once per item. */
/* Look up edata once per item. */
if (config_opt_safety_checks) {
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
nflush, item_extent);
tbin_edatas_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
nflush, item_edata);
} else {
for (unsigned i = 0 ; i < nflush; i++) {
item_extent[i] = iealloc(tsd_tsdn(tsd),
item_edata[i] = iealloc(tsd_tsdn(tsd),
*(bottom_item - i));
}
}
while (nflush > 0) {
/* Lock the arena bin associated with the first object. */
extent_t *extent = item_extent[0];
unsigned bin_arena_ind = extent_arena_ind_get(extent);
edata_t *edata = item_edata[0];
unsigned bin_arena_ind = edata_arena_ind_get(edata);
arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
false);
unsigned binshard = extent_binshard_get(extent);
unsigned binshard = edata_binshard_get(edata);
assert(binshard < bin_infos[binind].n_shards);
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
@ -187,13 +187,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
extent = item_extent[i];
assert(ptr != NULL && extent != NULL);
edata = item_edata[i];
assert(ptr != NULL && edata != NULL);
if (extent_arena_ind_get(extent) == bin_arena_ind
&& extent_binshard_get(extent) == binshard) {
if (edata_arena_ind_get(edata) == bin_arena_ind
&& edata_binshard_get(edata) == binshard) {
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, bin, binind, extent, ptr);
bin_arena, bin, binind, edata, ptr);
} else {
/*
* This object was allocated via a different
@ -202,7 +202,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* handled in a future pass.
*/
*(bottom_item - ndeferred) = ptr;
item_extent[ndeferred] = extent;
item_edata[ndeferred] = edata;
ndeferred++;
}
}
@ -244,22 +244,22 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
arena_t *tcache_arena = tcache->arena;
assert(tcache_arena != NULL);
unsigned nflush = ncached - rem;
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
VARIABLE_ARRAY(edata_t *, item_edata, nflush);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
/* Look up extent once per item. */
/* Look up edata once per item. */
for (unsigned i = 0 ; i < nflush; i++) {
item_extent[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
item_edata[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
}
#else
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
item_extent);
item_edata);
#endif
while (nflush > 0) {
/* Lock the arena associated with the first object. */
extent_t *extent = item_extent[0];
unsigned locked_arena_ind = extent_arena_ind_get(extent);
edata_t *edata = item_edata[0];
unsigned locked_arena_ind = edata_arena_ind_get(edata);
arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
locked_arena_ind, false);
@ -270,10 +270,10 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
assert(ptr != NULL);
extent = item_extent[i];
if (extent_arena_ind_get(extent) == locked_arena_ind) {
edata = item_edata[i];
if (edata_arena_ind_get(edata) == locked_arena_ind) {
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
extent);
edata);
}
}
if ((config_prof || config_stats) &&
@ -293,11 +293,11 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
extent = item_extent[i];
assert(ptr != NULL && extent != NULL);
edata = item_edata[i];
assert(ptr != NULL && edata != NULL);
if (extent_arena_ind_get(extent) == locked_arena_ind) {
large_dalloc_finish(tsd_tsdn(tsd), extent);
if (edata_arena_ind_get(edata) == locked_arena_ind) {
large_dalloc_finish(tsd_tsdn(tsd), edata);
} else {
/*
* This object was allocated via a different
@ -306,7 +306,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
* in a future pass.
*/
*(bottom_item - ndeferred) = ptr;
item_extent[ndeferred] = extent;
item_edata[ndeferred] = edata;
ndeferred++;
}
}

View File

@ -63,17 +63,17 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
edata_t *edata;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &edata, &szind)) {
return 0;
}
if (extent == NULL) {
if (edata == NULL) {
return 0;
}
if (extent_state_get(extent) != extent_state_active) {
if (edata_state_get(edata) != extent_state_active) {
return 0;
}

View File

@ -168,14 +168,14 @@ TEST_BEGIN(test_base_hooks_not_null) {
* that the first block's remaining space is considered for subsequent
* allocation.
*/
assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM,
assert_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
"Remainder insufficient for test");
/* Use up all but one quantum of block. */
while (extent_bsize_get(&base->blocks->extent) > QUANTUM) {
while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
assert_ptr_not_null(p, "Unexpected base_alloc() failure");
}
r_exp = extent_addr_get(&base->blocks->extent);
r_exp = edata_addr_get(&base->blocks->edata);
assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
assert_ptr_not_null(q, "Unexpected base_alloc() failure");

View File

@ -53,7 +53,7 @@ TEST_END
static void *
thd_start(void *varg) {
void *ptr, *ptr2;
extent_t *extent;
edata_t *edata;
unsigned shard1, shard2;
tsdn_t *tsdn = tsdn_fetch();
@ -62,13 +62,13 @@ thd_start(void *varg) {
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
extent = iealloc(tsdn, ptr);
shard1 = extent_binshard_get(extent);
edata = iealloc(tsdn, ptr);
shard1 = edata_binshard_get(edata);
dallocx(ptr, 0);
assert_u_lt(shard1, 16, "Unexpected bin shard used");
extent = iealloc(tsdn, ptr2);
shard2 = extent_binshard_get(extent);
edata = iealloc(tsdn, ptr2);
shard2 = edata_binshard_get(edata);
dallocx(ptr2, 0);
assert_u_lt(shard2, 4, "Unexpected bin shard used");

View File

@ -75,8 +75,8 @@ TEST_BEGIN(test_rtree_read_empty) {
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE,
false), "rtree_extent_read() should return NULL for empty tree");
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE,
false), "rtree_edata_read() should return NULL for empty tree");
rtree_delete(tsdn, rtree);
}
TEST_END
@ -86,11 +86,11 @@ TEST_END
#undef SEED
TEST_BEGIN(test_rtree_extrema) {
extent_t extent_a, extent_b;
extent_init(&extent_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
edata_t edata_a, edata_b;
edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
false, sz_size2index(SC_LARGE_MINCLASS), 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
extent_init(&extent_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
tsdn_t *tsdn = tsdn_fetch();
@ -100,21 +100,21 @@ TEST_BEGIN(test_rtree_extrema) {
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a,
extent_szind_get(&extent_a), extent_slab_get(&extent_a)),
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &edata_a,
edata_szind_get(&edata_a), edata_slab_get(&edata_a)),
"Unexpected rtree_write() failure");
rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
extent_szind_get(&extent_a), extent_slab_get(&extent_a));
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true),
&extent_a,
"rtree_extent_read() should return previously set value");
edata_szind_get(&edata_a), edata_slab_get(&edata_a));
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE, true),
&edata_a,
"rtree_edata_read() should return previously set value");
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
&extent_b, extent_szind_get_maybe_invalid(&extent_b),
extent_slab_get(&extent_b)), "Unexpected rtree_write() failure");
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
~((uintptr_t)0), true), &extent_b,
"rtree_extent_read() should return previously set value");
&edata_b, edata_szind_get_maybe_invalid(&edata_b),
edata_slab_get(&edata_b)), "Unexpected rtree_write() failure");
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
~((uintptr_t)0), true), &edata_b,
"rtree_edata_read() should return previously set value");
rtree_delete(tsdn, rtree);
}
@ -126,8 +126,8 @@ TEST_BEGIN(test_rtree_bits) {
uintptr_t keys[] = {PAGE, PAGE + 1,
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
extent_t extent;
extent_init(&extent, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
edata_t edata;
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
rtree_t *rtree = &test_rtree;
@ -137,17 +137,17 @@ TEST_BEGIN(test_rtree_bits) {
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
&extent, SC_NSIZES, false),
&edata, SC_NSIZES, false),
"Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
keys[j], true), &extent,
"rtree_extent_read() should return previously set "
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[j], true), &edata,
"rtree_edata_read() should return previously set "
"value and ignore insignificant key bits; i=%u, "
"j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
j, keys[i], keys[j]);
}
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
(((uintptr_t)2) << LG_PAGE), false),
"Only leftmost rtree leaf should be set; i=%u", i);
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
@ -167,8 +167,8 @@ TEST_BEGIN(test_rtree_random) {
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
extent_t extent;
extent_init(&extent, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
edata_t edata;
edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
@ -179,29 +179,29 @@ TEST_BEGIN(test_rtree_random) {
&rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure");
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
rtree_leaf_elm_write(tsdn, rtree, elm, &edata, SC_NSIZES,
false);
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &extent,
"rtree_extent_read() should return previously set value");
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &edata,
"rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &extent,
"rtree_extent_read() should return previously set value, "
assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &edata,
"rtree_edata_read() should return previously set value, "
"i=%u", i);
}
for (unsigned i = 0; i < NSET; i++) {
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true),
"rtree_extent_read() should return previously set value");
"rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true),
"rtree_extent_read() should return previously set value");
"rtree_edata_read() should return previously set value");
}
rtree_delete(tsdn, rtree);

View File

@ -7,24 +7,24 @@ TEST_BEGIN(test_arena_slab_regind) {
for (binind = 0; binind < SC_NBINS; binind++) {
size_t regind;
extent_t slab;
edata_t slab;
const bin_info_t *bin_info = &bin_infos[binind];
extent_init(&slab, INVALID_ARENA_IND,
edata_init(&slab, INVALID_ARENA_IND,
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
bin_info->slab_size, true,
binind, 0, extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
assert_ptr_not_null(extent_addr_get(&slab),
assert_ptr_not_null(edata_addr_get(&slab),
"Unexpected malloc() failure");
for (regind = 0; regind < bin_info->nregs; regind++) {
void *reg = (void *)((uintptr_t)extent_addr_get(&slab) +
void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
(bin_info->reg_size * regind));
assert_zu_eq(arena_slab_regind(&slab, binind, reg),
regind,
"Incorrect region index computed for size %zu",
bin_info->reg_size);
}
free(extent_addr_get(&slab));
free(edata_addr_get(&slab));
}
}
TEST_END