Remove extent knowledge of arena.
This commit is contained in:
parent
e77f47a85a
commit
eba35e2e48
@ -19,34 +19,34 @@
|
|||||||
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
||||||
extern size_t opt_lg_extent_max_active_fit;
|
extern size_t opt_lg_extent_max_active_fit;
|
||||||
|
|
||||||
edata_t *ecache_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *ecache_alloc(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
szind_t szind, bool *zero);
|
szind_t szind, bool *zero);
|
||||||
edata_t *ecache_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
szind_t szind, bool *zero);
|
szind_t szind, bool *zero);
|
||||||
void ecache_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void ecache_dalloc(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, edata_t *edata);
|
ecache_t *ecache, edata_t *edata);
|
||||||
edata_t *ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *ecache_evict(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, size_t npages_min);
|
ecache_t *ecache, size_t npages_min);
|
||||||
|
|
||||||
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
|
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
|
||||||
bool *zero, bool *commit);
|
bool *zero, bool *commit);
|
||||||
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void extent_dalloc_gap(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata);
|
edata_t *edata);
|
||||||
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void extent_dalloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata);
|
edata_t *edata);
|
||||||
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
void extent_destroy_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata);
|
edata_t *edata);
|
||||||
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
size_t offset, size_t length);
|
size_t offset, size_t length);
|
||||||
bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
size_t offset, size_t length);
|
size_t offset, size_t length);
|
||||||
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
edata_t *edata, size_t offset, size_t length);
|
size_t offset, size_t length);
|
||||||
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
edata_t *edata, size_t offset, size_t length);
|
size_t offset, size_t length);
|
||||||
edata_t *extent_split_wrapper(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
edata_t *extent_split_wrapper(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
|
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
|
||||||
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
|
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
|
||||||
|
31
src/arena.c
31
src/arena.c
@ -282,8 +282,8 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
ecache_dalloc(tsdn, arena, ehooks, &arena->pa_shard.ecache_dirty,
|
ecache_dalloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
edata);
|
&arena->pa_shard.ecache_dirty, edata);
|
||||||
if (arena_dirty_decay_ms_get(arena) == 0) {
|
if (arena_dirty_decay_ms_get(arena) == 0) {
|
||||||
arena_decay_dirty(tsdn, arena, false, true);
|
arena_decay_dirty(tsdn, arena, false, true);
|
||||||
} else {
|
} else {
|
||||||
@ -459,16 +459,16 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
size_t mapped_add;
|
size_t mapped_add;
|
||||||
size_t esize = usize + sz_large_pad;
|
size_t esize = usize + sz_large_pad;
|
||||||
edata_t *edata = ecache_alloc(tsdn, arena, ehooks,
|
edata_t *edata = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_dirty, NULL, esize, alignment, false, szind,
|
&arena->pa_shard.ecache_dirty, NULL, esize, alignment, false, szind,
|
||||||
zero);
|
zero);
|
||||||
if (edata == NULL && arena_may_have_muzzy(arena)) {
|
if (edata == NULL && arena_may_have_muzzy(arena)) {
|
||||||
edata = ecache_alloc(tsdn, arena, ehooks,
|
edata = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_muzzy, NULL, esize, alignment,
|
&arena->pa_shard.ecache_muzzy, NULL, esize, alignment,
|
||||||
false, szind, zero);
|
false, szind, zero);
|
||||||
}
|
}
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
edata = ecache_alloc_grow(tsdn, arena, ehooks,
|
edata = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, NULL, esize, alignment,
|
&arena->pa_shard.ecache_retained, NULL, esize, alignment,
|
||||||
false, szind, zero);
|
false, szind, zero);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -655,7 +655,7 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
|
|||||||
size_t nstashed = 0;
|
size_t nstashed = 0;
|
||||||
edata_t *edata;
|
edata_t *edata;
|
||||||
while (nstashed < npages_decay_max &&
|
while (nstashed < npages_decay_max &&
|
||||||
(edata = ecache_evict(tsdn, arena, ehooks, ecache, npages_limit))
|
(edata = ecache_evict(tsdn, &arena->pa_shard, ehooks, ecache, npages_limit))
|
||||||
!= NULL) {
|
!= NULL) {
|
||||||
edata_list_append(decay_extents, edata);
|
edata_list_append(decay_extents, edata);
|
||||||
nstashed += edata_size_get(edata) >> LG_PAGE;
|
nstashed += edata_size_get(edata) >> LG_PAGE;
|
||||||
@ -690,9 +690,9 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
not_reached();
|
not_reached();
|
||||||
case extent_state_dirty:
|
case extent_state_dirty:
|
||||||
if (!all && muzzy_decay_ms != 0 &&
|
if (!all && muzzy_decay_ms != 0 &&
|
||||||
!extent_purge_lazy_wrapper(tsdn, arena,
|
!extent_purge_lazy_wrapper(tsdn, ehooks, edata, 0,
|
||||||
ehooks, edata, 0, edata_size_get(edata))) {
|
edata_size_get(edata))) {
|
||||||
ecache_dalloc(tsdn, arena, ehooks,
|
ecache_dalloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_muzzy, edata);
|
&arena->pa_shard.ecache_muzzy, edata);
|
||||||
arena_background_thread_inactivity_check(tsdn,
|
arena_background_thread_inactivity_check(tsdn,
|
||||||
arena, is_background_thread);
|
arena, is_background_thread);
|
||||||
@ -700,7 +700,8 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
JEMALLOC_FALLTHROUGH;
|
JEMALLOC_FALLTHROUGH;
|
||||||
case extent_state_muzzy:
|
case extent_state_muzzy:
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
|
extent_dalloc_wrapper(tsdn, &arena->pa_shard, ehooks,
|
||||||
|
edata);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
nunmapped += npages;
|
nunmapped += npages;
|
||||||
}
|
}
|
||||||
@ -988,9 +989,9 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
*/
|
*/
|
||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
edata_t *edata;
|
edata_t *edata;
|
||||||
while ((edata = ecache_evict(tsdn, arena, ehooks,
|
while ((edata = ecache_evict(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, 0)) != NULL) {
|
&arena->pa_shard.ecache_retained, 0)) != NULL) {
|
||||||
extent_destroy_wrapper(tsdn, arena, ehooks, edata);
|
extent_destroy_wrapper(tsdn, &arena->pa_shard, ehooks, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1040,7 +1041,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
slab = ecache_alloc_grow(tsdn, arena, ehooks,
|
slab = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, NULL, bin_info->slab_size, PAGE,
|
&arena->pa_shard.ecache_retained, NULL, bin_info->slab_size, PAGE,
|
||||||
true, szind, &zero);
|
true, szind, &zero);
|
||||||
|
|
||||||
@ -1061,11 +1062,11 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
|||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
szind_t szind = sz_size2index(bin_info->reg_size);
|
szind_t szind = sz_size2index(bin_info->reg_size);
|
||||||
bool zero = false;
|
bool zero = false;
|
||||||
edata_t *slab = ecache_alloc(tsdn, arena, ehooks,
|
edata_t *slab = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_dirty, NULL, bin_info->slab_size, PAGE,
|
&arena->pa_shard.ecache_dirty, NULL, bin_info->slab_size, PAGE,
|
||||||
true, binind, &zero);
|
true, binind, &zero);
|
||||||
if (slab == NULL && arena_may_have_muzzy(arena)) {
|
if (slab == NULL && arena_may_have_muzzy(arena)) {
|
||||||
slab = ecache_alloc(tsdn, arena, ehooks,
|
slab = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_muzzy, NULL, bin_info->slab_size,
|
&arena->pa_shard.ecache_muzzy, NULL, bin_info->slab_size,
|
||||||
PAGE, true, binind, &zero);
|
PAGE, true, binind, &zero);
|
||||||
}
|
}
|
||||||
|
286
src/extent.c
286
src/extent.c
@ -15,12 +15,10 @@ size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
|
|||||||
|
|
||||||
static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
size_t offset, size_t length, bool growing_retained);
|
size_t offset, size_t length, bool growing_retained);
|
||||||
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
|
static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
|
||||||
ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length,
|
edata_t *edata, size_t offset, size_t length, bool growing_retained);
|
||||||
bool growing_retained);
|
static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
|
||||||
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
|
edata_t *edata, size_t offset, size_t length, bool growing_retained);
|
||||||
ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length,
|
|
||||||
bool growing_retained);
|
|
||||||
static edata_t *extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
static edata_t *extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
|
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
|
||||||
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
|
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
|
||||||
@ -39,15 +37,16 @@ static atomic_zu_t highpages;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static void extent_deregister(tsdn_t *tsdn, edata_t *edata);
|
static void extent_deregister(tsdn_t *tsdn, edata_t *edata);
|
||||||
static edata_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
static edata_t *extent_recycle(tsdn_t *tsdn, pa_shard_t *shard,
|
||||||
ecache_t *ecache, void *new_addr, size_t usize, size_t alignment, bool slab,
|
ehooks_t *ehooks, ecache_t *ecache, void *new_addr, size_t usize,
|
||||||
szind_t szind, bool *zero, bool *commit, bool growing_retained);
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
|
||||||
|
bool growing_retained);
|
||||||
static edata_t *extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
static edata_t *extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
|
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
|
||||||
bool growing_retained);
|
bool growing_retained);
|
||||||
static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
static void extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, edata_t *edata, bool growing_retained);
|
ecache_t *ecache, edata_t *edata, bool growing_retained);
|
||||||
static edata_t *extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard,
|
||||||
ehooks_t *ehooks, void *new_addr, size_t size, size_t alignment, bool slab,
|
ehooks_t *ehooks, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
szind_t szind, bool *zero, bool *commit);
|
szind_t szind, bool *zero, bool *commit);
|
||||||
|
|
||||||
@ -70,23 +69,7 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
|||||||
}
|
}
|
||||||
|
|
||||||
edata_t *
|
edata_t *
|
||||||
ecache_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
ecache_alloc(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
|
|
||||||
bool *zero) {
|
|
||||||
assert(size != 0);
|
|
||||||
assert(alignment != 0);
|
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
||||||
WITNESS_RANK_CORE, 0);
|
|
||||||
|
|
||||||
bool commit = true;
|
|
||||||
edata_t *edata = extent_recycle(tsdn, arena, ehooks, ecache, new_addr,
|
|
||||||
size, alignment, slab, szind, zero, &commit, false);
|
|
||||||
assert(edata == NULL || edata_dumpable_get(edata));
|
|
||||||
return edata;
|
|
||||||
}
|
|
||||||
|
|
||||||
edata_t *
|
|
||||||
ecache_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|
||||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
szind_t szind, bool *zero) {
|
szind_t szind, bool *zero) {
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
@ -95,7 +78,23 @@ ecache_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
edata_t *edata = extent_alloc_retained(tsdn, arena, ehooks, new_addr,
|
edata_t *edata = extent_recycle(tsdn, shard, ehooks, ecache,
|
||||||
|
new_addr, size, alignment, slab, szind, zero, &commit, false);
|
||||||
|
assert(edata == NULL || edata_dumpable_get(edata));
|
||||||
|
return edata;
|
||||||
|
}
|
||||||
|
|
||||||
|
edata_t *
|
||||||
|
ecache_alloc_grow(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
|
szind_t szind, bool *zero) {
|
||||||
|
assert(size != 0);
|
||||||
|
assert(alignment != 0);
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
|
bool commit = true;
|
||||||
|
edata_t *edata = extent_alloc_retained(tsdn, shard, ehooks, new_addr,
|
||||||
size, alignment, slab, szind, zero, &commit);
|
size, alignment, slab, szind, zero, &commit);
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
if (opt_retain && new_addr != NULL) {
|
if (opt_retain && new_addr != NULL) {
|
||||||
@ -107,7 +106,7 @@ ecache_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
*/
|
*/
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
edata = extent_alloc_wrapper(tsdn, arena, ehooks, new_addr,
|
edata = extent_alloc_wrapper(tsdn, shard, ehooks, new_addr,
|
||||||
size, alignment, slab, szind, zero, &commit);
|
size, alignment, slab, szind, zero, &commit);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,8 +115,8 @@ ecache_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ecache_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
ecache_dalloc(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
ecache_t *ecache, edata_t *edata) {
|
||||||
assert(edata_base_get(edata) != NULL);
|
assert(edata_base_get(edata) != NULL);
|
||||||
assert(edata_size_get(edata) != 0);
|
assert(edata_size_get(edata) != 0);
|
||||||
assert(edata_dumpable_get(edata));
|
assert(edata_dumpable_get(edata));
|
||||||
@ -127,12 +126,12 @@ ecache_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
edata_addr_set(edata, edata_base_get(edata));
|
edata_addr_set(edata, edata_base_get(edata));
|
||||||
edata_zeroed_set(edata, false);
|
edata_zeroed_set(edata, false);
|
||||||
|
|
||||||
extent_record(tsdn, arena, ehooks, ecache, edata, false);
|
extent_record(tsdn, shard, ehooks, ecache, edata, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
edata_t *
|
edata_t *
|
||||||
ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
ecache_evict(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
size_t npages_min) {
|
ecache_t *ecache, size_t npages_min) {
|
||||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -157,8 +156,8 @@ ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* Try to coalesce. */
|
/* Try to coalesce. */
|
||||||
if (extent_try_delayed_coalesce(tsdn,
|
if (extent_try_delayed_coalesce(tsdn, &shard->edata_cache,
|
||||||
&arena->pa_shard.edata_cache, ehooks, ecache, edata)) {
|
ehooks, ecache, edata)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -195,11 +194,11 @@ label_return:
|
|||||||
* indicates OOM), e.g. when trying to split an existing extent.
|
* indicates OOM), e.g. when trying to split an existing extent.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extents_abandon_vm(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, edata_t *edata, bool growing_retained) {
|
ecache_t *ecache, edata_t *edata, bool growing_retained) {
|
||||||
size_t sz = edata_size_get(edata);
|
size_t sz = edata_size_get(edata);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
atomic_fetch_add_zu(&arena->pa_shard.stats->abandoned_vm, sz,
|
atomic_fetch_add_zu(&shard->stats->abandoned_vm, sz,
|
||||||
ATOMIC_RELAXED);
|
ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -207,13 +206,13 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
* that this is only a virtual memory leak.
|
* that this is only a virtual memory leak.
|
||||||
*/
|
*/
|
||||||
if (ecache->state == extent_state_dirty) {
|
if (ecache->state == extent_state_dirty) {
|
||||||
if (extent_purge_lazy_impl(tsdn, arena, ehooks, edata, 0, sz,
|
if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
|
||||||
growing_retained)) {
|
growing_retained)) {
|
||||||
extent_purge_forced_impl(tsdn, arena, ehooks, edata, 0,
|
extent_purge_forced_impl(tsdn, ehooks, edata, 0,
|
||||||
edata_size_get(edata), growing_retained);
|
edata_size_get(edata), growing_retained);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -356,7 +355,7 @@ extent_deregister_no_gdump_sub(tsdn_t *tsdn, edata_t *edata) {
|
|||||||
* given allocation request.
|
* given allocation request.
|
||||||
*/
|
*/
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_recycle_extract(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
bool growing_retained) {
|
bool growing_retained) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
@ -440,7 +439,7 @@ typedef enum {
|
|||||||
} extent_split_interior_result_t;
|
} extent_split_interior_result_t;
|
||||||
|
|
||||||
static extent_split_interior_result_t
|
static extent_split_interior_result_t
|
||||||
extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_split_interior(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
/* The result of splitting, in case of success. */
|
/* The result of splitting, in case of success. */
|
||||||
edata_t **edata, edata_t **lead, edata_t **trail,
|
edata_t **edata, edata_t **lead, edata_t **trail,
|
||||||
/* The mess to clean up, in case of error. */
|
/* The mess to clean up, in case of error. */
|
||||||
@ -463,9 +462,9 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
/* Split the lead. */
|
/* Split the lead. */
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
*lead = *edata;
|
*lead = *edata;
|
||||||
*edata = extent_split_impl(tsdn, &arena->pa_shard.edata_cache,
|
*edata = extent_split_impl(tsdn, &shard->edata_cache, ehooks,
|
||||||
ehooks, *lead, leadsize, SC_NSIZES, false, size + trailsize,
|
*lead, leadsize, SC_NSIZES, false, size + trailsize, szind,
|
||||||
szind, slab, growing_retained);
|
slab, growing_retained);
|
||||||
if (*edata == NULL) {
|
if (*edata == NULL) {
|
||||||
*to_leak = *lead;
|
*to_leak = *lead;
|
||||||
*lead = NULL;
|
*lead = NULL;
|
||||||
@ -475,9 +474,9 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
|
|
||||||
/* Split the trail. */
|
/* Split the trail. */
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
*trail = extent_split_impl(tsdn, &arena->pa_shard.edata_cache,
|
*trail = extent_split_impl(tsdn, &shard->edata_cache, ehooks,
|
||||||
ehooks, *edata, size, szind, slab, trailsize, SC_NSIZES,
|
*edata, size, szind, slab, trailsize, SC_NSIZES, false,
|
||||||
false, growing_retained);
|
growing_retained);
|
||||||
if (*trail == NULL) {
|
if (*trail == NULL) {
|
||||||
*to_leak = *edata;
|
*to_leak = *edata;
|
||||||
*to_salvage = *lead;
|
*to_salvage = *lead;
|
||||||
@ -501,7 +500,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
* and put back into ecache.
|
* and put back into ecache.
|
||||||
*/
|
*/
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_recycle_split(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
szind_t szind, edata_t *edata, bool growing_retained) {
|
szind_t szind, edata_t *edata, bool growing_retained) {
|
||||||
edata_t *lead;
|
edata_t *lead;
|
||||||
@ -510,7 +509,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
|
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
|
||||||
|
|
||||||
extent_split_interior_result_t result = extent_split_interior(
|
extent_split_interior_result_t result = extent_split_interior(
|
||||||
tsdn, arena, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
|
tsdn, shard, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
|
||||||
new_addr, size, alignment, slab, szind, growing_retained);
|
new_addr, size, alignment, slab, szind, growing_retained);
|
||||||
|
|
||||||
if (!maps_coalesce && result != extent_split_interior_ok
|
if (!maps_coalesce && result != extent_split_interior_ok
|
||||||
@ -544,7 +543,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
if (to_leak != NULL) {
|
if (to_leak != NULL) {
|
||||||
void *leak = edata_base_get(to_leak);
|
void *leak = edata_base_get(to_leak);
|
||||||
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
||||||
extents_abandon_vm(tsdn, arena, ehooks, ecache, to_leak,
|
extents_abandon_vm(tsdn, shard, ehooks, ecache, to_leak,
|
||||||
growing_retained);
|
growing_retained);
|
||||||
assert(emap_lock_edata_from_addr(tsdn, &emap_global,
|
assert(emap_lock_edata_from_addr(tsdn, &emap_global,
|
||||||
leak, false) == NULL);
|
leak, false) == NULL);
|
||||||
@ -559,21 +558,21 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
* in the given ecache_t.
|
* in the given ecache_t.
|
||||||
*/
|
*/
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
extent_recycle(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
|
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
bool *zero, bool *commit, bool growing_retained) {
|
szind_t szind, bool *zero, bool *commit, bool growing_retained) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||||
assert(new_addr == NULL || !slab);
|
assert(new_addr == NULL || !slab);
|
||||||
assert(!*zero || !slab);
|
assert(!*zero || !slab);
|
||||||
|
|
||||||
edata_t *edata = extent_recycle_extract(tsdn, arena, ehooks, ecache,
|
edata_t *edata = extent_recycle_extract(tsdn, shard, ehooks, ecache,
|
||||||
new_addr, size, alignment, slab, growing_retained);
|
new_addr, size, alignment, slab, growing_retained);
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
edata = extent_recycle_split(tsdn, arena, ehooks, ecache, new_addr,
|
edata = extent_recycle_split(tsdn, shard, ehooks, ecache, new_addr,
|
||||||
size, alignment, slab, szind, edata, growing_retained);
|
size, alignment, slab, szind, edata, growing_retained);
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -582,7 +581,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
if (*commit && !edata_committed_get(edata)) {
|
if (*commit && !edata_committed_get(edata)) {
|
||||||
if (extent_commit_impl(tsdn, ehooks, edata, 0,
|
if (extent_commit_impl(tsdn, ehooks, edata, 0,
|
||||||
edata_size_get(edata), growing_retained)) {
|
edata_size_get(edata), growing_retained)) {
|
||||||
extent_record(tsdn, arena, ehooks, ecache, edata,
|
extent_record(tsdn, shard, ehooks, ecache, edata,
|
||||||
growing_retained);
|
growing_retained);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -614,13 +613,13 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
/*
|
/*
|
||||||
* If virtual memory is retained, create increasingly larger extents from which
|
* If virtual memory is retained, create increasingly larger extents from which
|
||||||
* to split requested extents in order to limit the total number of disjoint
|
* to split requested extents in order to limit the total number of disjoint
|
||||||
* virtual memory ranges retained by each arena.
|
* virtual memory ranges retained by each shard.
|
||||||
*/
|
*/
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
size_t size, size_t alignment, bool slab, szind_t szind,
|
size_t size, size_t alignment, bool slab, szind_t szind,
|
||||||
bool *zero, bool *commit) {
|
bool *zero, bool *commit) {
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_assert_owner(tsdn, &shard->ecache_grow.mtx);
|
||||||
assert(!*zero || !slab);
|
assert(!*zero || !slab);
|
||||||
|
|
||||||
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
|
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
|
||||||
@ -633,20 +632,19 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
* satisfy this request.
|
* satisfy this request.
|
||||||
*/
|
*/
|
||||||
pszind_t egn_skip = 0;
|
pszind_t egn_skip = 0;
|
||||||
size_t alloc_size = sz_pind2sz(
|
size_t alloc_size = sz_pind2sz(shard->ecache_grow.next + egn_skip);
|
||||||
arena->pa_shard.ecache_grow.next + egn_skip);
|
|
||||||
while (alloc_size < alloc_size_min) {
|
while (alloc_size < alloc_size_min) {
|
||||||
egn_skip++;
|
egn_skip++;
|
||||||
if (arena->pa_shard.ecache_grow.next + egn_skip >=
|
if (shard->ecache_grow.next + egn_skip >=
|
||||||
sz_psz2ind(SC_LARGE_MAXCLASS)) {
|
sz_psz2ind(SC_LARGE_MAXCLASS)) {
|
||||||
/* Outside legal range. */
|
/* Outside legal range. */
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
alloc_size = sz_pind2sz(
|
alloc_size = sz_pind2sz(
|
||||||
arena->pa_shard.ecache_grow.next + egn_skip);
|
shard->ecache_grow.next + egn_skip);
|
||||||
}
|
}
|
||||||
|
|
||||||
edata_t *edata = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
|
edata_t *edata = edata_cache_get(tsdn, &shard->edata_cache);
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
@ -657,17 +655,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
&committed);
|
&committed);
|
||||||
|
|
||||||
if (ptr == NULL) {
|
if (ptr == NULL) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_retained), ptr,
|
edata_init(edata, ecache_ind_get(&shard->ecache_retained), ptr,
|
||||||
alloc_size, false, SC_NSIZES,
|
alloc_size, false, SC_NSIZES, pa_shard_extent_sn_next(shard),
|
||||||
pa_shard_extent_sn_next(&arena->pa_shard), extent_state_active,
|
extent_state_active, zeroed, committed, true, EXTENT_IS_HEAD);
|
||||||
zeroed, committed, true, EXTENT_IS_HEAD);
|
|
||||||
|
|
||||||
if (extent_register_no_gdump_add(tsdn, edata)) {
|
if (extent_register_no_gdump_add(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -684,17 +681,17 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
|
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
|
||||||
|
|
||||||
extent_split_interior_result_t result = extent_split_interior(tsdn,
|
extent_split_interior_result_t result = extent_split_interior(tsdn,
|
||||||
arena, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
|
shard, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
|
||||||
size, alignment, slab, szind, true);
|
size, alignment, slab, szind, true);
|
||||||
|
|
||||||
if (result == extent_split_interior_ok) {
|
if (result == extent_split_interior_ok) {
|
||||||
if (lead != NULL) {
|
if (lead != NULL) {
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, lead, true);
|
&shard->ecache_retained, lead, true);
|
||||||
}
|
}
|
||||||
if (trail != NULL) {
|
if (trail != NULL) {
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, trail, true);
|
&shard->ecache_retained, trail, true);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -706,13 +703,13 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
extent_gdump_add(tsdn, to_salvage);
|
extent_gdump_add(tsdn, to_salvage);
|
||||||
}
|
}
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, to_salvage, true);
|
&shard->ecache_retained, to_salvage, true);
|
||||||
}
|
}
|
||||||
if (to_leak != NULL) {
|
if (to_leak != NULL) {
|
||||||
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
||||||
extents_abandon_vm(tsdn, arena, ehooks,
|
extents_abandon_vm(tsdn, shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, to_leak, true);
|
&shard->ecache_retained, to_leak, true);
|
||||||
}
|
}
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
@ -720,8 +717,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
if (*commit && !edata_committed_get(edata)) {
|
if (*commit && !edata_committed_get(edata)) {
|
||||||
if (extent_commit_impl(tsdn, ehooks, edata, 0,
|
if (extent_commit_impl(tsdn, ehooks, edata, 0,
|
||||||
edata_size_get(edata), true)) {
|
edata_size_get(edata), true)) {
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, edata, true);
|
&shard->ecache_retained, edata, true);
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
/* A successful commit should return zeroed memory. */
|
/* A successful commit should return zeroed memory. */
|
||||||
@ -739,15 +736,14 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
* Increment extent_grow_next if doing so wouldn't exceed the allowed
|
* Increment extent_grow_next if doing so wouldn't exceed the allowed
|
||||||
* range.
|
* range.
|
||||||
*/
|
*/
|
||||||
if (arena->pa_shard.ecache_grow.next + egn_skip + 1 <=
|
if (shard->ecache_grow.next + egn_skip + 1 <=
|
||||||
arena->pa_shard.ecache_grow.limit) {
|
shard->ecache_grow.limit) {
|
||||||
arena->pa_shard.ecache_grow.next += egn_skip + 1;
|
shard->ecache_grow.next += egn_skip + 1;
|
||||||
} else {
|
} else {
|
||||||
arena->pa_shard.ecache_grow.next
|
shard->ecache_grow.next = shard->ecache_grow.limit;
|
||||||
= arena->pa_shard.ecache_grow.limit;
|
|
||||||
}
|
}
|
||||||
/* All opportunities for failure are past. */
|
/* All opportunities for failure are past. */
|
||||||
malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
|
||||||
|
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
/* Adjust gdump stats now that extent is final size. */
|
/* Adjust gdump stats now that extent is final size. */
|
||||||
@ -765,47 +761,47 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
|
|
||||||
return edata;
|
return edata;
|
||||||
label_err:
|
label_err:
|
||||||
malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
|
void *new_addr, size_t size, size_t alignment, bool slab, szind_t szind,
|
||||||
bool *zero, bool *commit) {
|
bool *zero, bool *commit) {
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_lock(tsdn, &shard->ecache_grow.mtx);
|
||||||
|
|
||||||
edata_t *edata = extent_recycle(tsdn, arena, ehooks,
|
edata_t *edata = extent_recycle(tsdn, shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, new_addr, size, alignment, slab,
|
&shard->ecache_retained, new_addr, size, alignment, slab,
|
||||||
szind, zero, commit, true);
|
szind, zero, commit, true);
|
||||||
if (edata != NULL) {
|
if (edata != NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
extent_gdump_add(tsdn, edata);
|
extent_gdump_add(tsdn, edata);
|
||||||
}
|
}
|
||||||
} else if (opt_retain && new_addr == NULL) {
|
} else if (opt_retain && new_addr == NULL) {
|
||||||
edata = extent_grow_retained(tsdn, arena, ehooks, size,
|
edata = extent_grow_retained(tsdn, shard, ehooks, size,
|
||||||
alignment, slab, szind, zero, commit);
|
alignment, slab, szind, zero, commit);
|
||||||
/* extent_grow_retained() always releases extent_grow_mtx. */
|
/* extent_grow_retained() always releases extent_grow_mtx. */
|
||||||
} else {
|
} else {
|
||||||
malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
|
||||||
}
|
}
|
||||||
malloc_mutex_assert_not_owner(tsdn, &arena->pa_shard.ecache_grow.mtx);
|
malloc_mutex_assert_not_owner(tsdn, &shard->ecache_grow.mtx);
|
||||||
|
|
||||||
return edata;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
edata_t *
|
edata_t *
|
||||||
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_alloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool slab,
|
void *new_addr, size_t size, size_t alignment, bool slab,
|
||||||
szind_t szind, bool *zero, bool *commit) {
|
szind_t szind, bool *zero, bool *commit) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
edata_t *edata = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
|
edata_t *edata = edata_cache_get(tsdn, &shard->edata_cache);
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -813,14 +809,14 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
|
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
|
||||||
zero, commit);
|
zero, commit);
|
||||||
if (addr == NULL) {
|
if (addr == NULL) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_dirty), addr,
|
edata_init(edata, ecache_ind_get(&shard->ecache_dirty), addr,
|
||||||
size, slab, szind, pa_shard_extent_sn_next(&arena->pa_shard),
|
size, slab, szind, pa_shard_extent_sn_next(shard),
|
||||||
extent_state_active, *zero, *commit, true, EXTENT_NOT_HEAD);
|
extent_state_active, *zero, *commit, true, EXTENT_NOT_HEAD);
|
||||||
if (extent_register(tsdn, edata)) {
|
if (extent_register(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -956,24 +952,24 @@ extent_try_coalesce_large(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
|||||||
|
|
||||||
/* Purge a single extent to retained / unmapped directly. */
|
/* Purge a single extent to retained / unmapped directly. */
|
||||||
static void
|
static void
|
||||||
extent_maximally_purge(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
|
extent_maximally_purge(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
edata_t *edata) {
|
||||||
size_t extent_size = edata_size_get(edata);
|
size_t extent_size = edata_size_get(edata);
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
|
extent_dalloc_wrapper(tsdn, shard, ehooks, edata);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/* Update stats accordingly. */
|
/* Update stats accordingly. */
|
||||||
LOCKEDINT_MTX_LOCK(tsdn, *arena->pa_shard.stats_mtx);
|
LOCKEDINT_MTX_LOCK(tsdn, *shard->stats_mtx);
|
||||||
locked_inc_u64(tsdn,
|
locked_inc_u64(tsdn,
|
||||||
LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
|
LOCKEDINT_MTX(*shard->stats_mtx),
|
||||||
&arena->pa_shard.stats->decay_dirty.nmadvise, 1);
|
&shard->stats->decay_dirty.nmadvise, 1);
|
||||||
locked_inc_u64(tsdn,
|
locked_inc_u64(tsdn,
|
||||||
LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
|
LOCKEDINT_MTX(*shard->stats_mtx),
|
||||||
&arena->pa_shard.stats->decay_dirty.purged,
|
&shard->stats->decay_dirty.purged,
|
||||||
extent_size >> LG_PAGE);
|
extent_size >> LG_PAGE);
|
||||||
locked_dec_zu(tsdn,
|
locked_dec_zu(tsdn,
|
||||||
LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
|
LOCKEDINT_MTX(*shard->stats_mtx),
|
||||||
&arena->pa_shard.stats->mapped, extent_size);
|
&shard->stats->mapped, extent_size);
|
||||||
LOCKEDINT_MTX_UNLOCK(tsdn, *arena->pa_shard.stats_mtx);
|
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -983,8 +979,8 @@ extent_maximally_purge(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
|
|||||||
* given ecache_t (coalesces, deregisters slab interiors, the heap operations).
|
* given ecache_t (coalesces, deregisters slab interiors, the heap operations).
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata, bool growing_retained) {
|
ecache_t *ecache, edata_t *edata, bool growing_retained) {
|
||||||
assert((ecache->state != extent_state_dirty &&
|
assert((ecache->state != extent_state_dirty &&
|
||||||
ecache->state != extent_state_muzzy) ||
|
ecache->state != extent_state_muzzy) ||
|
||||||
!edata_zeroed_get(edata));
|
!edata_zeroed_get(edata));
|
||||||
@ -1000,23 +996,23 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
emap_assert_mapped(tsdn, &emap_global, edata);
|
emap_assert_mapped(tsdn, &emap_global, edata);
|
||||||
|
|
||||||
if (!ecache->delay_coalesce) {
|
if (!ecache->delay_coalesce) {
|
||||||
edata = extent_try_coalesce(tsdn, &arena->pa_shard.edata_cache,
|
edata = extent_try_coalesce(tsdn, &shard->edata_cache, ehooks,
|
||||||
ehooks, ecache, edata, NULL, growing_retained);
|
ecache, edata, NULL, growing_retained);
|
||||||
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
||||||
assert(ecache == &arena->pa_shard.ecache_dirty);
|
assert(ecache == &shard->ecache_dirty);
|
||||||
/* Always coalesce large extents eagerly. */
|
/* Always coalesce large extents eagerly. */
|
||||||
bool coalesced;
|
bool coalesced;
|
||||||
do {
|
do {
|
||||||
assert(edata_state_get(edata) == extent_state_active);
|
assert(edata_state_get(edata) == extent_state_active);
|
||||||
edata = extent_try_coalesce_large(tsdn,
|
edata = extent_try_coalesce_large(tsdn,
|
||||||
&arena->pa_shard.edata_cache, ehooks, ecache, edata,
|
&shard->edata_cache, ehooks, ecache, edata,
|
||||||
&coalesced, growing_retained);
|
&coalesced, growing_retained);
|
||||||
} while (coalesced);
|
} while (coalesced);
|
||||||
if (edata_size_get(edata) >= oversize_threshold &&
|
if (edata_size_get(edata) >= oversize_threshold &&
|
||||||
pa_shard_may_force_decay(&arena->pa_shard)) {
|
pa_shard_may_force_decay(shard)) {
|
||||||
/* Shortcut to purge the oversize extent eagerly. */
|
/* Shortcut to purge the oversize extent eagerly. */
|
||||||
malloc_mutex_unlock(tsdn, &ecache->mtx);
|
malloc_mutex_unlock(tsdn, &ecache->mtx);
|
||||||
extent_maximally_purge(tsdn, arena, ehooks, edata);
|
extent_maximally_purge(tsdn, shard, ehooks, edata);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1026,20 +1022,20 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_dalloc_gap(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
edata_t *edata) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
if (extent_register(tsdn, edata)) {
|
if (extent_register(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
|
extent_dalloc_wrapper(tsdn, shard, ehooks, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_dalloc_wrapper_try(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
edata_t *edata) {
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
@ -1055,14 +1051,14 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
edata_size_get(edata), edata_committed_get(edata));
|
edata_size_get(edata), edata_committed_get(edata));
|
||||||
|
|
||||||
if (!err) {
|
if (!err) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_dalloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
edata_t *edata) {
|
||||||
assert(edata_dumpable_get(edata));
|
assert(edata_dumpable_get(edata));
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
@ -1075,7 +1071,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
* threads, and reregister if deallocation fails.
|
* threads, and reregister if deallocation fails.
|
||||||
*/
|
*/
|
||||||
extent_deregister(tsdn, edata);
|
extent_deregister(tsdn, edata);
|
||||||
if (!extent_dalloc_wrapper_try(tsdn, arena, ehooks, edata)) {
|
if (!extent_dalloc_wrapper_try(tsdn, shard, ehooks, edata)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
extent_reregister(tsdn, edata);
|
extent_reregister(tsdn, edata);
|
||||||
@ -1104,12 +1100,12 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
extent_gdump_sub(tsdn, edata);
|
extent_gdump_sub(tsdn, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_record(tsdn, arena, ehooks, &arena->pa_shard.ecache_retained,
|
extent_record(tsdn, shard, ehooks, &shard->ecache_retained, edata,
|
||||||
edata, false);
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_destroy_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||||
edata_t *edata) {
|
edata_t *edata) {
|
||||||
assert(edata_base_get(edata) != NULL);
|
assert(edata_base_get(edata) != NULL);
|
||||||
assert(edata_size_get(edata) != 0);
|
assert(edata_size_get(edata) != 0);
|
||||||
@ -1125,7 +1121,7 @@ extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
|
ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
|
||||||
edata_size_get(edata), edata_committed_get(edata));
|
edata_size_get(edata), edata_committed_get(edata));
|
||||||
|
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &shard->edata_cache, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -1158,8 +1154,8 @@ extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
edata_t *edata, size_t offset, size_t length, bool growing_retained) {
|
size_t offset, size_t length, bool growing_retained) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||||
bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
|
bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
|
||||||
@ -1168,15 +1164,15 @@ extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
edata_t *edata, size_t offset, size_t length) {
|
size_t offset, size_t length) {
|
||||||
return extent_purge_lazy_impl(tsdn, arena, ehooks, edata, offset,
|
return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
|
||||||
length, false);
|
length, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
edata_t *edata, size_t offset, size_t length, bool growing_retained) {
|
size_t offset, size_t length, bool growing_retained) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||||
bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
|
bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
|
||||||
@ -1185,10 +1181,10 @@ extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
||||||
edata_t *edata, size_t offset, size_t length) {
|
size_t offset, size_t length) {
|
||||||
return extent_purge_forced_impl(tsdn, arena, ehooks, edata,
|
return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
|
||||||
offset, length, false);
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -188,8 +188,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
if (gap_size_page != 0) {
|
if (gap_size_page != 0) {
|
||||||
ehooks_t *ehooks = arena_get_ehooks(
|
ehooks_t *ehooks = arena_get_ehooks(
|
||||||
arena);
|
arena);
|
||||||
extent_dalloc_gap(tsdn, arena, ehooks,
|
extent_dalloc_gap(tsdn,
|
||||||
gap);
|
&arena->pa_shard, ehooks, gap);
|
||||||
} else {
|
} else {
|
||||||
edata_cache_put(tsdn,
|
edata_cache_put(tsdn,
|
||||||
&arena->pa_shard.edata_cache, gap);
|
&arena->pa_shard.edata_cache, gap);
|
||||||
@ -208,7 +208,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
extent_state_active, false, true,
|
extent_state_active, false, true,
|
||||||
true, EXTENT_NOT_HEAD);
|
true, EXTENT_NOT_HEAD);
|
||||||
if (extent_purge_forced_wrapper(tsdn,
|
if (extent_purge_forced_wrapper(tsdn,
|
||||||
arena, ehooks, &edata, 0, size)) {
|
ehooks, &edata, 0, size)) {
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,17 +120,17 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
|||||||
bool is_zeroed_trail = zero;
|
bool is_zeroed_trail = zero;
|
||||||
edata_t *trail;
|
edata_t *trail;
|
||||||
bool new_mapping;
|
bool new_mapping;
|
||||||
if ((trail = ecache_alloc(tsdn, arena, ehooks,
|
if ((trail = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_dirty, edata_past_get(edata), trailsize,
|
&arena->pa_shard.ecache_dirty, edata_past_get(edata), trailsize,
|
||||||
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL
|
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL
|
||||||
|| (trail = ecache_alloc(tsdn, arena, ehooks,
|
|| (trail = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_muzzy, edata_past_get(edata), trailsize,
|
&arena->pa_shard.ecache_muzzy, edata_past_get(edata), trailsize,
|
||||||
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL) {
|
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
new_mapping = false;
|
new_mapping = false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ((trail = ecache_alloc_grow(tsdn, arena, ehooks,
|
if ((trail = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks,
|
||||||
&arena->pa_shard.ecache_retained, edata_past_get(edata),
|
&arena->pa_shard.ecache_retained, edata_past_get(edata),
|
||||||
trailsize, CACHELINE, false, SC_NSIZES, &is_zeroed_trail))
|
trailsize, CACHELINE, false, SC_NSIZES, &is_zeroed_trail))
|
||||||
== NULL) {
|
== NULL) {
|
||||||
@ -143,7 +143,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
|||||||
|
|
||||||
if (extent_merge_wrapper(tsdn, ehooks, &arena->pa_shard.edata_cache,
|
if (extent_merge_wrapper(tsdn, ehooks, &arena->pa_shard.edata_cache,
|
||||||
edata, trail)) {
|
edata, trail)) {
|
||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
|
extent_dalloc_wrapper(tsdn, &arena->pa_shard, ehooks, trail);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user