Rename extent_t to edata_t.

This frees us up from the unfortunate extent/extent2 naming collision.
This commit is contained in:
David Goldblatt
2019-12-09 14:36:45 -08:00
committed by David Goldblatt
parent 865debda22
commit a7862df616
32 changed files with 1200 additions and 1208 deletions

View File

@@ -60,9 +60,9 @@ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all);
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
/******************************************************************************/
@@ -102,8 +102,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_zu(&astats->retained,
eset_npages_get(&arena->eset_retained) << LG_PAGE);
atomic_store_zu(&astats->extent_avail,
atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
atomic_store_zu(&astats->edata_avail,
atomic_load_zu(&arena->edata_avail_cnt, ATOMIC_RELAXED),
ATOMIC_RELAXED);
arena_stats_accum_u64(&astats->decay_dirty.npurge,
@@ -224,7 +224,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
READ_ARENA_MUTEX_PROF_DATA(edata_avail_mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(eset_dirty.mtx,
arena_prof_mutex_extents_dirty)
@@ -254,11 +254,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
void
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_t *extent) {
edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, extent);
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, edata);
if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true);
} else {
@@ -267,34 +267,34 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
}
static void *
arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
void *ret;
slab_data_t *slab_data = extent_slab_data_get(slab);
slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind;
assert(extent_nfree_get(slab) > 0);
assert(edata_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)extent_addr_get(slab) +
ret = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
extent_nfree_dec(slab);
edata_nfree_dec(slab);
return ret;
}
static void
arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) {
slab_data_t *slab_data = extent_slab_data_get(slab);
slab_data_t *slab_data = edata_slab_data_get(slab);
assert(extent_nfree_get(slab) >= cnt);
assert(edata_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info);
*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
*(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
}
#else
@@ -315,7 +315,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* hot loop below.
*/
uintptr_t base = (uintptr_t)extent_addr_get(slab);
uintptr_t base = (uintptr_t)edata_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) {
size_t bit = cfs_lu(&g);
@@ -327,24 +327,24 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data->bitmap[group] = g;
}
#endif
extent_nfree_sub(slab, cnt);
edata_nfree_sub(slab, cnt);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(&arena_binind_div_info[binind], diff);
@@ -355,17 +355,17 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
}
static void
arena_slab_reg_dalloc(extent_t *slab, slab_data_t *slab_data, void *ptr) {
szind_t binind = extent_szind_get(slab);
arena_slab_reg_dalloc(edata_t *slab, slab_data_t *slab_data, void *ptr) {
szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(slab, binind, ptr);
assert(extent_nfree_get(slab) < bin_info->nregs);
assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
extent_nfree_inc(slab);
edata_nfree_inc(slab);
}
static void
@@ -423,7 +423,7 @@ arena_may_have_muzzy(arena_t *arena) {
return arena_muzzy_decay_ms_get(arena) != 0;
}
extent_t *
edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) {
ehooks_t *ehooks = arena_get_ehooks(arena);
@@ -434,23 +434,22 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
szind_t szind = sz_size2index(usize);
size_t mapped_add;
bool commit = true;
extent_t *extent = extents_alloc(tsdn, arena, ehooks,
&arena->eset_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit);
if (extent == NULL && arena_may_have_muzzy(arena)) {
extent = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
edata_t *edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit);
if (edata == NULL && arena_may_have_muzzy(arena)) {
edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
NULL, usize, sz_large_pad, alignment, false, szind, zero,
&commit);
}
size_t size = usize + sz_large_pad;
if (extent == NULL) {
extent = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
if (edata == NULL) {
edata = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
sz_large_pad, alignment, false, szind, zero, &commit);
if (config_stats) {
/*
* extent may be NULL on OOM, but in that case
* mapped_add isn't used below, so there's no need to
* conditionlly set it to 0 here.
* edata may be NULL on OOM, but in that case mapped_add
* isn't used below, so there's no need to conditionlly
* set it to 0 here.
*/
mapped_add = size;
}
@@ -458,7 +457,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
mapped_add = 0;
}
if (extent != NULL) {
if (edata != NULL) {
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_large_malloc_stats_update(tsdn, arena, usize);
@@ -471,24 +470,24 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, size >> LG_PAGE);
}
return extent;
return edata;
}
void
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_large_dalloc_stats_update(tsdn, arena,
extent_usize_get(extent));
edata_usize_get(edata));
arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
arena_nactive_sub(arena, edata_size_get(edata) >> LG_PAGE);
}
void
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t usize = edata_usize_get(edata);
size_t udiff = oldusize - usize;
if (config_stats) {
@@ -500,9 +499,9 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
void
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t usize = edata_usize_get(edata);
size_t udiff = usize - oldusize;
if (config_stats) {
@@ -819,25 +818,25 @@ arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
static size_t
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, eset_t *eset, size_t npages_limit,
size_t npages_decay_max, extent_list_t *decay_extents) {
size_t npages_decay_max, edata_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
extent_t *extent;
edata_t *edata;
while (nstashed < npages_decay_max &&
(extent = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
(edata = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
!= NULL) {
extent_list_append(decay_extents, extent);
nstashed += extent_size_get(extent) >> LG_PAGE;
edata_list_append(decay_extents, edata);
nstashed += edata_size_get(edata) >> LG_PAGE;
}
return nstashed;
}
static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
arena_decay_t *decay, eset_t *eset, bool all, extent_list_t *decay_extents,
arena_decay_t *decay, eset_t *eset, bool all, edata_list_t *decay_extents,
bool is_background_thread) {
size_t nmadvise, nunmapped;
size_t npurged;
@@ -849,31 +848,30 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
npurged = 0;
ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
for (extent_t *extent = extent_list_first(decay_extents); extent !=
NULL; extent = extent_list_first(decay_extents)) {
for (edata_t *edata = edata_list_first(decay_extents); edata !=
NULL; edata = edata_list_first(decay_extents)) {
if (config_stats) {
nmadvise++;
}
size_t npages = extent_size_get(extent) >> LG_PAGE;
size_t npages = edata_size_get(edata) >> LG_PAGE;
npurged += npages;
extent_list_remove(decay_extents, extent);
edata_list_remove(decay_extents, edata);
switch (eset_state_get(eset)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
if (!all && muzzy_decay_ms != 0 &&
!extent_purge_lazy_wrapper(tsdn, arena,
ehooks, extent, 0,
extent_size_get(extent))) {
ehooks, edata, 0, edata_size_get(edata))) {
extents_dalloc(tsdn, arena, ehooks,
&arena->eset_muzzy, extent);
&arena->eset_muzzy, edata);
arena_background_thread_inactivity_check(tsdn,
arena, is_background_thread);
break;
}
JEMALLOC_FALLTHROUGH;
case extent_state_muzzy:
extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
if (config_stats) {
nunmapped += npages;
}
@@ -923,8 +921,8 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
ehooks_t *ehooks = arena_get_ehooks(arena);
extent_list_t decay_extents;
extent_list_init(&decay_extents);
edata_list_t decay_extents;
edata_list_init(&decay_extents);
size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, eset,
npages_limit, npages_decay_max, &decay_extents);
@@ -1000,33 +998,33 @@ arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
}
static void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE);
ehooks_t *ehooks = arena_get_ehooks(arena);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab);
}
static void
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab);
arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
assert(edata_nfree_get(slab) > 0);
edata_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
}
static void
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab);
arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
edata_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
}
static extent_t *
static edata_t *
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return NULL;
}
@@ -1038,30 +1036,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
static void
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) == 0);
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
assert(edata_nfree_get(slab) == 0);
/*
* Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the extent
* for auto arenas. Bypass this step to avoid touching the edata
* linkage (often results in cache misses) for auto arenas.
*/
if (arena_is_auto(arena)) {
return;
}
extent_list_append(&bin->slabs_full, slab);
edata_list_append(&bin->slabs_full, slab);
}
static void
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
extent_list_remove(&bin->slabs_full, slab);
edata_list_remove(&bin->slabs_full, slab);
}
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
extent_t *slab;
edata_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
@@ -1071,13 +1069,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
for (slab = edata_list_first(&bin->slabs_full); slab != NULL;
slab = edata_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
@@ -1109,9 +1107,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
for (extent_t *extent = extent_list_first(&arena->large); extent !=
NULL; extent = extent_list_first(&arena->large)) {
void *ptr = extent_base_get(extent);
for (edata_t *edata = edata_list_first(&arena->large); edata !=
NULL; edata = edata_list_first(&arena->large)) {
void *ptr = edata_base_get(edata);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -1129,7 +1127,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx);
}
large_dalloc(tsd_tsdn(tsd), extent);
large_dalloc(tsd_tsdn(tsd), edata);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -1157,10 +1155,10 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = arena_get_ehooks(arena);
extent_t *extent;
while ((extent = extents_evict(tsdn, arena, ehooks,
edata_t *edata;
while ((edata = extents_evict(tsdn, arena, ehooks,
&arena->eset_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, arena, ehooks, extent);
extent_destroy_wrapper(tsdn, arena, ehooks, edata);
}
}
@@ -1200,10 +1198,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
base_delete(tsd_tsdn(tsd), arena->base);
}
static extent_t *
static edata_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
const bin_info_t *bin_info, szind_t szind) {
extent_t *slab;
edata_t *slab;
bool zero, commit;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@@ -1222,7 +1220,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return slab;
}
static extent_t *
static edata_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@@ -1232,7 +1230,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
szind_t szind = sz_size2index(bin_info->reg_size);
bool zero = false;
bool commit = true;
extent_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
edata_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit);
if (slab == NULL && arena_may_have_muzzy(arena)) {
slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
@@ -1246,22 +1244,22 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
return NULL;
}
}
assert(extent_slab_get(slab));
assert(edata_slab_get(slab));
/* Initialize slab internals. */
slab_data_t *slab_data = extent_slab_data_get(slab);
extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
slab_data_t *slab_data = edata_slab_data_get(slab);
edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
arena_nactive_add(arena, edata_size_get(slab) >> LG_PAGE);
return slab;
}
static extent_t *
static edata_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, unsigned binshard) {
extent_t *slab;
edata_t *slab;
const bin_info_t *bin_info;
/* Look for a usable slab. */
@@ -1307,14 +1305,14 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
if (bin->slabcur != NULL) {
/* Only attempted when current slab is full. */
assert(extent_nfree_get(bin->slabcur) == 0);
assert(edata_nfree_get(bin->slabcur) == 0);
}
const bin_info_t *bin_info = &bin_infos[binind];
extent_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
edata_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
binshard);
if (bin->slabcur != NULL) {
if (extent_nfree_get(bin->slabcur) > 0) {
if (edata_nfree_get(bin->slabcur) > 0) {
/*
* Another thread updated slabcur while this one ran
* without the bin lock in arena_bin_nonfull_slab_get().
@@ -1331,7 +1329,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
if (extent_nfree_get(slab) == bin_info->nregs) {
if (edata_nfree_get(slab) == bin_info->nregs) {
arena_dalloc_bin_slab(tsdn, arena, slab,
bin);
} else {
@@ -1350,7 +1348,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
return NULL;
}
bin->slabcur = slab;
assert(extent_nfree_get(bin->slabcur) > 0);
assert(edata_nfree_get(bin->slabcur) > 0);
return arena_slab_reg_alloc(slab, bin_info);
}
@@ -1386,12 +1384,12 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
void **empty_position = cache_bin_empty_position_get(tbin, binind);
for (i = 0, nfill = (cache_bin_ncached_max_get(binind) >>
tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
extent_t *slab;
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
edata_t *slab;
if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) >
0) {
unsigned tofill = nfill - i;
cnt = tofill < extent_nfree_get(slab) ?
tofill : extent_nfree_get(slab);
cnt = tofill < edata_nfree_get(slab) ?
tofill : edata_nfree_get(slab);
arena_slab_reg_alloc_batch(
slab, &bin_infos[binind], cnt,
empty_position - nfill + i);
@@ -1454,14 +1452,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
bin_t *bin;
size_t usize;
extent_t *slab;
edata_t *slab;
assert(binind < SC_NBINS);
usize = sz_index2size(binind);
unsigned binshard;
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
} else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
@@ -1554,11 +1552,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind);
edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
szind, false);
@@ -1568,11 +1566,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
}
static size_t
arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
extent_szind_set(extent, SC_NBINS);
edata_szind_set(edata, SC_NBINS);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
@@ -1589,9 +1587,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof);
assert(opt_prof);
extent_t *extent = iealloc(tsdn, ptr);
size_t usize = extent_usize_get(extent);
size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
edata_t *edata = iealloc(tsdn, ptr);
size_t usize = edata_usize_get(edata);
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/*
* Currently, we only do redzoning for small sampled
@@ -1604,17 +1602,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path);
} else {
large_dalloc(tsdn, extent);
large_dalloc(tsdn, edata);
}
}
static void
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */
if (slab == bin->slabcur) {
bin->slabcur = NULL;
} else {
szind_t binind = extent_szind_get(slab);
szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
/*
@@ -1631,7 +1629,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
static void
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
assert(slab != bin->slabcur);
@@ -1646,9 +1644,9 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
static void
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
assert(extent_nfree_get(slab) > 0);
assert(edata_nfree_get(slab) > 0);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
@@ -1656,9 +1654,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
if (extent_nfree_get(bin->slabcur) > 0) {
if (edata_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
@@ -1674,8 +1672,8 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *slab, void *ptr, bool junked) {
slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind, edata_t *slab, void *ptr, bool junked) {
slab_data_t *slab_data = edata_slab_data_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
@@ -1683,7 +1681,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
}
arena_slab_reg_dalloc(slab, slab_data, ptr);
unsigned nfree = extent_nfree_get(slab);
unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
arena_dissociate_bin_slab(arena, slab, bin);
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
@@ -1700,29 +1698,29 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *extent, void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
szind_t binind, edata_t *edata, void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
true);
}
static void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
szind_t binind = extent_szind_get(extent);
unsigned binshard = extent_binshard_get(extent);
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
szind_t binind = edata_szind_get(edata);
unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
extent_t *extent = iealloc(tsdn, ptr);
arena_t *arena = arena_get_from_extent(extent);
edata_t *edata = iealloc(tsdn, ptr);
arena_t *arena = arena_get_from_edata(edata);
arena_dalloc_bin(tsdn, arena, extent, ptr);
arena_dalloc_bin(tsdn, arena, edata, ptr);
arena_decay_tick(tsdn, arena);
}
@@ -1733,7 +1731,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
extent_t *extent = iealloc(tsdn, ptr);
edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
@@ -1756,19 +1754,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto done;
}
arena_t *arena = arena_get_from_extent(extent);
arena_t *arena = arena_get_from_edata(edata);
arena_decay_tick(tsdn, arena);
ret = false;
} else if (oldsize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS) {
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
zero);
} else {
ret = true;
}
done:
assert(extent == iealloc(tsdn, ptr));
*newsize = extent_usize_get(extent);
assert(edata == iealloc(tsdn, ptr));
*newsize = edata_usize_get(edata);
return ret;
}
@@ -2006,7 +2004,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
extent_list_init(&arena->large);
edata_list_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;
@@ -2055,9 +2053,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
extent_avail_new(&arena->extent_avail);
if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
edata_avail_new(&arena->edata_avail);
if (malloc_mutex_init(&arena->edata_avail_mtx, "edata_avail",
WITNESS_RANK_EDATA_AVAIL, malloc_mutex_rank_exclusive)) {
goto label_error;
}
@@ -2203,7 +2201,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
malloc_mutex_prefork(tsdn, &arena->edata_avail_mtx);
}
void
@@ -2237,7 +2235,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->edata_avail_mtx);
eset_postfork_parent(tsdn, &arena->eset_dirty);
eset_postfork_parent(tsdn, &arena->eset_muzzy);
eset_postfork_parent(tsdn, &arena->eset_retained);
@@ -2283,7 +2281,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
malloc_mutex_postfork_child(tsdn, &arena->edata_avail_mtx);
eset_postfork_child(tsdn, &arena->eset_dirty);
eset_postfork_child(tsdn, &arena->eset_muzzy);
eset_postfork_child(tsdn, &arena->eset_retained);

View File

@@ -105,14 +105,14 @@ label_done:
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
extent_binit(extent, addr, size, sn);
edata_binit(edata, addr, size, sn);
}
static size_t
@@ -158,7 +158,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
@@ -166,34 +166,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)edata_addr_get(edata);
ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
assert(edata_bsize_get(edata) >= *gap_size + size);
edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
edata_sn_get(edata));
return ret;
}
static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) {
if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
sz_size2index(edata_bsize_get(edata) + 1) - 1;
edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
@@ -218,13 +218,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static void *
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
@@ -284,7 +284,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
@@ -293,7 +293,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static extent_t *
static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
@@ -327,7 +327,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
return &block->edata;
}
base_t *
@@ -357,7 +357,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
base->ind = ind;
ehooks_init(&base->ehooks, extent_hooks);
@@ -371,7 +371,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]);
edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
@@ -384,7 +384,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
@@ -422,28 +422,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL;
edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
edata = edata_heap_remove_first(&base->avail[i]);
if (edata != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
if (edata == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
if (edata == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(base, extent, usize, alignment);
ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
*esn = edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
@@ -463,16 +463,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
edata_t *
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
CACHELINE, &esn);
if (extent == NULL) {
if (edata == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
edata_esn_set(edata, esn);
return edata;
}
void

View File

@@ -45,8 +45,8 @@ bin_init(bin_t *bin) {
return true;
}
bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull);
extent_list_init(&bin->slabs_full);
edata_heap_new(&bin->slabs_nonfull);
edata_list_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}

View File

@@ -855,8 +855,8 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
&astats->astats.mapped);
accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained);
accum_atomic_zu(&sdstats->astats.extent_avail,
&astats->astats.extent_avail);
accum_atomic_zu(&sdstats->astats.edata_avail,
&astats->astats.edata_avail);
}
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
@@ -2603,18 +2603,18 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
int ret;
unsigned arena_ind;
void *ptr;
extent_t *extent;
edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent == NULL)
edata = iealloc(tsd_tsdn(tsd), ptr);
if (edata == NULL)
goto label_return;
arena = arena_get_from_extent(extent);
arena = arena_get_from_edata(edata);
if (arena == NULL)
goto label_return;
@@ -2860,7 +2860,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.edata_avail,
ATOMIC_RELAXED),
size_t)
@@ -3010,7 +3010,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->extent_avail_mtx);
MUTEX_PROF_RESET(arena->edata_avail_mtx);
MUTEX_PROF_RESET(arena->eset_dirty.mtx);
MUTEX_PROF_RESET(arena->eset_muzzy.mtx);
MUTEX_PROF_RESET(arena->eset_retained.mtx);

View File

@@ -1,6 +1,6 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
ph_gen(, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp)
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
ph_gen(, edata_avail_, edata_tree_t, edata_t, ph_link,
edata_esnead_comp)
ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp)

View File

@@ -200,8 +200,8 @@ ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
tsdn_t *tsdn = tsdn_fetch();
extent_t *a = iealloc(tsdn, addr_a);
extent_t *b = iealloc(tsdn, addr_b);
edata_t *a = iealloc(tsdn, addr_a);
edata_t *b = iealloc(tsdn, addr_b);
if (extent_head_no_merge(a, b)) {
return true;
}

View File

@@ -16,10 +16,10 @@ eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
extent_heap_new(&eset->heaps[i]);
edata_heap_new(&eset->heaps[i]);
}
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
extent_list_init(&eset->lru);
edata_list_init(&eset->lru);
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state;
eset->delay_coalesce = delay_coalesce;
@@ -63,24 +63,24 @@ eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
}
void
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
assert(extent_state_get(extent) == eset->state);
assert(edata_state_get(edata) == eset->state);
size_t size = extent_size_get(extent);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&eset->heaps[pind])) {
if (edata_heap_empty(&eset->heaps[pind])) {
bitmap_unset(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
extent_heap_insert(&eset->heaps[pind], extent);
edata_heap_insert(&eset->heaps[pind], edata);
if (config_stats) {
eset_stats_add(eset, pind, size);
}
extent_list_append(&eset->lru, extent);
edata_list_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
@@ -94,24 +94,24 @@ eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
}
void
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
assert(extent_state_get(extent) == eset->state);
assert(edata_state_get(edata) == eset->state);
size_t size = extent_size_get(extent);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&eset->heaps[pind], extent);
edata_heap_remove(&eset->heaps[pind], edata);
if (config_stats) {
eset_stats_sub(eset, pind, size);
}
if (extent_heap_empty(&eset->heaps[pind])) {
if (edata_heap_empty(&eset->heaps[pind])) {
bitmap_set(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
extent_list_remove(&eset->lru, extent);
edata_list_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic
@@ -128,7 +128,7 @@ eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
@@ -139,10 +139,10 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(!edata_heap_empty(&eset->heaps[i]));
edata_t *edata = edata_heap_first(&eset->heaps[i]);
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
@@ -154,7 +154,7 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
return edata;
}
}
@@ -165,9 +165,9 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static extent_t *
static edata_t *
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
extent_t *ret = NULL;
edata_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
@@ -176,8 +176,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&eset->heaps[pind]) ? NULL :
extent_heap_first(&eset->heaps[pind]);
return edata_heap_empty(&eset->heaps[pind]) ? NULL :
edata_heap_first(&eset->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
@@ -185,9 +185,9 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&eset->heaps[i]));
extent_t *extent = extent_heap_first(&eset->heaps[i]);
assert(extent_size_get(extent) >= size);
assert(!edata_heap_empty(&eset->heaps[i]));
edata_t *edata = edata_heap_first(&eset->heaps[i]);
assert(edata_size_get(edata) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large eset for much smaller sizes.
@@ -198,8 +198,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
if (ret == NULL || edata_snad_comp(edata, ret) < 0) {
ret = edata;
}
if (i == SC_NPSIZES) {
break;
@@ -210,7 +210,7 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
return ret;
}
extent_t *
edata_t *
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
@@ -220,18 +220,18 @@ eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
return NULL;
}
extent_t *extent = eset_first_fit_locked(tsdn, eset, max_size);
edata_t *edata = eset_first_fit_locked(tsdn, eset, max_size);
if (alignment > PAGE && extent == NULL) {
if (alignment > PAGE && edata == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = eset_fit_alignment(eset, esize, max_size, alignment);
edata = eset_fit_alignment(eset, esize, max_size, alignment);
}
return extent;
return edata;
}
void

File diff suppressed because it is too large Load Diff

View File

@@ -109,7 +109,7 @@ extent_dss_max_update(void *new_addr) {
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
extent_t *gap;
edata_t *gap;
cassert(have_dss);
assert(size > 0);
@@ -153,7 +153,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
extent_init(gap, arena_ind_get(arena),
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, arena_extent_sn_next(arena),
extent_state_active, false, true, true,
@@ -194,17 +194,17 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
extent_t extent;
edata_t edata;
ehooks_t *ehooks = arena_get_ehooks(
arena);
extent_init(&extent,
edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
true, EXTENT_NOT_HEAD);
if (extent_purge_forced_wrapper(tsdn,
arena, ehooks, &extent, 0, size)) {
arena, ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}

View File

@@ -6,21 +6,21 @@ inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
const edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = extent_nfree_get(extent);
*nregs = bin_infos[extent_szind_get(extent)].nregs;
*nfree = edata_nfree_get(edata);
*nregs = bin_infos[edata_szind_get(edata)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
assert(*nfree * edata_usize_get(edata) <= *size);
}
}
@@ -31,31 +31,31 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
const edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
}
*nfree = extent_nfree_get(extent);
const szind_t szind = extent_szind_get(extent);
*nfree = edata_nfree_get(edata);
const szind_t szind = edata_szind_get(edata);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
assert(*nfree * edata_usize_get(edata) <= *size);
const arena_t *arena = (arena_t *)atomic_load_p(
&arenas[extent_arena_ind_get(extent)], ATOMIC_RELAXED);
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
assert(arena != NULL);
const unsigned binshard = extent_binshard_get(extent);
const unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
@@ -66,12 +66,12 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
} else {
*bin_nfree = *bin_nregs = 0;
}
extent_t *slab;
edata_t *slab;
if (bin->slabcur != NULL) {
slab = bin->slabcur;
} else {
slab = extent_heap_first(&bin->slabs_nonfull);
slab = edata_heap_first(&bin->slabs_nonfull);
}
*slabcur_addr = slab != NULL ? extent_addr_get(slab) : NULL;
*slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
malloc_mutex_unlock(tsdn, &bin->lock);
}

View File

@@ -21,7 +21,7 @@ void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
extent_t *extent;
edata_t *edata;
bool is_zeroed;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
@@ -44,28 +44,28 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
return NULL;
}
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
/* Insert extent into large. */
/* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
extent_list_append(&arena->large, extent);
edata_list_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
if (zero) {
assert(is_zeroed);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
extent_usize_get(extent));
memset(edata_addr_get(edata), JEMALLOC_ALLOC_JUNK,
edata_usize_get(edata));
}
arena_decay_tick(tsdn, arena);
return extent_addr_get(extent);
return edata_addr_get(edata);
}
static void
@@ -90,11 +90,11 @@ large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
large_dalloc_maybe_junk_impl;
static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
arena_t *arena = arena_get_from_extent(extent);
size_t oldusize = extent_usize_get(extent);
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata);
size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
size_t diff = edata_size_get(edata) - (usize + sz_large_pad);
assert(oldusize > usize);
@@ -104,31 +104,31 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
ehooks, extent, usize + sz_large_pad, sz_size2index(usize),
edata_t *trail = extent_split_wrapper(tsdn, arena,
ehooks, edata, usize + sz_large_pad, sz_size2index(usize),
false, diff, SC_NSIZES, false);
if (trail == NULL) {
return true;
}
if (config_fill && unlikely(opt_junk_free)) {
large_dalloc_maybe_junk(extent_addr_get(trail),
extent_size_get(trail));
large_dalloc_maybe_junk(edata_addr_get(trail),
edata_size_get(trail));
}
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
}
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
arena_extent_ralloc_large_shrink(tsdn, arena, edata, oldusize);
return false;
}
static bool
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
arena_t *arena = arena_get_from_extent(extent);
size_t oldusize = extent_usize_get(extent);
arena_t *arena = arena_get_from_edata(edata);
size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t trailsize = usize - oldusize;
@@ -147,20 +147,20 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
*/
bool is_zeroed_trail = zero;
bool commit = true;
extent_t *trail;
edata_t *trail;
bool new_mapping;
if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL
|| (trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL) {
if (config_stats) {
new_mapping = false;
}
} else {
if ((trail = extent_alloc_wrapper(tsdn, arena, ehooks,
extent_past_get(extent), trailsize, 0, CACHELINE, false,
edata_past_get(edata), trailsize, 0, CACHELINE, false,
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
return true;
}
@@ -169,16 +169,16 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
}
if (extent_merge_wrapper(tsdn, arena, ehooks, extent, trail)) {
if (extent_merge_wrapper(tsdn, arena, ehooks, edata, trail)) {
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
return true;
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind);
edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(extent), szind, false);
(uintptr_t)edata_addr_get(edata), szind, false);
if (config_stats && new_mapping) {
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
@@ -194,7 +194,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((uintptr_t)extent_addr_get(extent) + oldusize);
((uintptr_t)edata_addr_get(edata) + oldusize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
@@ -203,19 +203,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
assert(is_zeroed_trail);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
memset((void *)((uintptr_t)edata_addr_get(edata) + oldusize),
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize);
return false;
}
bool
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero) {
size_t oldusize = extent_usize_get(extent);
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
@@ -225,16 +225,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
large_ralloc_no_move_expand(tsdn, extent, usize_min,
zero)) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -244,14 +243,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
* the new size.
*/
if (oldusize >= usize_min && oldusize <= usize_max) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Attempt to shrink the allocation in-place. */
if (oldusize > usize_max) {
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
arena_decay_tick(tsdn, arena_get_from_extent(extent));
if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -271,9 +270,9 @@ void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
extent_t *extent = iealloc(tsdn, ptr);
edata_t *edata = iealloc(tsdn, ptr);
size_t oldusize = extent_usize_get(extent);
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
@@ -281,11 +280,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
return extent_addr_get(extent);
return edata_addr_get(edata);
}
/*
@@ -306,8 +305,8 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
memcpy(ret, extent_addr_get(extent), copysize);
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
memcpy(ret, edata_addr_get(edata), copysize);
isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
return ret;
}
@@ -316,76 +315,75 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
* whether the arena's large_mtx is currently held.
*/
static void
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool junked_locked) {
if (!junked_locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
extent_list_remove(&arena->large, extent);
edata_list_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
large_dalloc_maybe_junk(extent_addr_get(extent),
extent_usize_get(extent));
large_dalloc_maybe_junk(edata_addr_get(edata),
edata_usize_get(edata));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
extent_list_remove(&arena->large, extent);
edata_list_remove(&arena->large, edata);
}
}
arena_extent_dalloc_large_prep(tsdn, arena, extent);
arena_extent_dalloc_large_prep(tsdn, arena, edata);
}
static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
ehooks_t *ehooks = arena_get_ehooks(arena);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, extent);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, edata);
}
void
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
large_dalloc_prep_impl(tsdn, arena_get_from_extent(extent), extent,
true);
large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}
void
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
large_dalloc_finish_impl(tsdn, arena_get_from_extent(extent), extent);
large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
}
void
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
arena_t *arena = arena_get_from_extent(extent);
large_dalloc_prep_impl(tsdn, arena, extent, false);
large_dalloc_finish_impl(tsdn, arena, extent);
large_dalloc(tsdn_t *tsdn, edata_t *edata) {
arena_t *arena = arena_get_from_edata(edata);
large_dalloc_prep_impl(tsdn, arena, edata, false);
large_dalloc_finish_impl(tsdn, arena, edata);
arena_decay_tick(tsdn, arena);
}
size_t
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
return extent_usize_get(extent);
large_salloc(tsdn_t *tsdn, const edata_t *edata) {
return edata_usize_get(edata);
}
void
large_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
extent_prof_info_get(extent, prof_info);
large_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
edata_prof_info_get(edata, prof_info);
}
static void
large_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
extent_prof_tctx_set(extent, tctx);
large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
edata_prof_tctx_set(edata, tctx);
}
void
large_prof_tctx_reset(extent_t *extent) {
large_prof_tctx_set(extent, (prof_tctx_t *)(uintptr_t)1U);
large_prof_tctx_reset(edata_t *edata) {
large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
}
void
large_prof_info_set(extent_t *extent, prof_tctx_t *tctx) {
large_prof_tctx_set(extent, tctx);
large_prof_info_set(edata_t *edata, prof_tctx_t *tctx) {
large_prof_tctx_set(edata, tctx);
nstime_t t;
nstime_init_update(&t);
extent_prof_alloc_time_set(extent, &t);
edata_prof_alloc_time_set(edata, &t);
}

View File

@@ -114,8 +114,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
/* Enabled with --enable-extra-size-check. */
static void
tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t nflush, extent_t **extents){
tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t nflush, edata_t **edatas){
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -129,9 +129,9 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t sz_sum = binind * nflush;
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
for (unsigned i = 0 ; i < nflush; i++) {
rtree_extent_szind_read(tsdn, &extents_rtree,
rtree_edata_szind_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)*(bottom_item - i), true,
&extents[i], &szind);
&edatas[i], &szind);
sz_sum -= szind;
}
if (sz_sum != 0) {
@@ -154,26 +154,26 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
arena_t *arena = tcache->arena;
assert(arena != NULL);
unsigned nflush = ncached - rem;
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
VARIABLE_ARRAY(edata_t *, item_edata, nflush);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
/* Look up extent once per item. */
/* Look up edata once per item. */
if (config_opt_safety_checks) {
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
nflush, item_extent);
tbin_edatas_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
nflush, item_edata);
} else {
for (unsigned i = 0 ; i < nflush; i++) {
item_extent[i] = iealloc(tsd_tsdn(tsd),
item_edata[i] = iealloc(tsd_tsdn(tsd),
*(bottom_item - i));
}
}
while (nflush > 0) {
/* Lock the arena bin associated with the first object. */
extent_t *extent = item_extent[0];
unsigned bin_arena_ind = extent_arena_ind_get(extent);
edata_t *edata = item_edata[0];
unsigned bin_arena_ind = edata_arena_ind_get(edata);
arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
false);
unsigned binshard = extent_binshard_get(extent);
unsigned binshard = edata_binshard_get(edata);
assert(binshard < bin_infos[binind].n_shards);
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
@@ -187,13 +187,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
extent = item_extent[i];
assert(ptr != NULL && extent != NULL);
edata = item_edata[i];
assert(ptr != NULL && edata != NULL);
if (extent_arena_ind_get(extent) == bin_arena_ind
&& extent_binshard_get(extent) == binshard) {
if (edata_arena_ind_get(edata) == bin_arena_ind
&& edata_binshard_get(edata) == binshard) {
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, bin, binind, extent, ptr);
bin_arena, bin, binind, edata, ptr);
} else {
/*
* This object was allocated via a different
@@ -202,7 +202,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* handled in a future pass.
*/
*(bottom_item - ndeferred) = ptr;
item_extent[ndeferred] = extent;
item_edata[ndeferred] = edata;
ndeferred++;
}
}
@@ -244,22 +244,22 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
arena_t *tcache_arena = tcache->arena;
assert(tcache_arena != NULL);
unsigned nflush = ncached - rem;
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
VARIABLE_ARRAY(edata_t *, item_edata, nflush);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
/* Look up extent once per item. */
/* Look up edata once per item. */
for (unsigned i = 0 ; i < nflush; i++) {
item_extent[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
item_edata[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
}
#else
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
item_extent);
item_edata);
#endif
while (nflush > 0) {
/* Lock the arena associated with the first object. */
extent_t *extent = item_extent[0];
unsigned locked_arena_ind = extent_arena_ind_get(extent);
edata_t *edata = item_edata[0];
unsigned locked_arena_ind = edata_arena_ind_get(edata);
arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
locked_arena_ind, false);
@@ -270,10 +270,10 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
assert(ptr != NULL);
extent = item_extent[i];
if (extent_arena_ind_get(extent) == locked_arena_ind) {
edata = item_edata[i];
if (edata_arena_ind_get(edata) == locked_arena_ind) {
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
extent);
edata);
}
}
if ((config_prof || config_stats) &&
@@ -293,11 +293,11 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
extent = item_extent[i];
assert(ptr != NULL && extent != NULL);
edata = item_edata[i];
assert(ptr != NULL && edata != NULL);
if (extent_arena_ind_get(extent) == locked_arena_ind) {
large_dalloc_finish(tsd_tsdn(tsd), extent);
if (edata_arena_ind_get(edata) == locked_arena_ind) {
large_dalloc_finish(tsd_tsdn(tsd), edata);
} else {
/*
* This object was allocated via a different
@@ -306,7 +306,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
* in a future pass.
*/
*(bottom_item - ndeferred) = ptr;
item_extent[ndeferred] = extent;
item_edata[ndeferred] = edata;
ndeferred++;
}
}