From a24faed56915df38c5ab67b66cefbb596c0e165c Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Sun, 8 Mar 2020 10:35:56 -0700 Subject: [PATCH] PA: Move in the ecache_t objects. --- include/jemalloc/internal/arena_structs.h | 12 +-- include/jemalloc/internal/pa.h | 13 +++ src/arena.c | 112 ++++++++++++---------- src/background_thread.c | 8 +- src/ctl.c | 6 +- src/extent.c | 20 ++-- src/large.c | 16 ++-- 7 files changed, 102 insertions(+), 85 deletions(-) diff --git a/include/jemalloc/internal/arena_structs.h b/include/jemalloc/internal/arena_structs.h index fde540af..23fa424c 100644 --- a/include/jemalloc/internal/arena_structs.h +++ b/include/jemalloc/internal/arena_structs.h @@ -12,6 +12,7 @@ #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/pa.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/smoothstep.h" @@ -150,15 +151,8 @@ struct arena_s { /* Synchronizes all large allocation/update/deallocation. */ malloc_mutex_t large_mtx; - /* - * Collections of extents that were previously allocated. These are - * used when allocating extents, in an attempt to re-use address space. - * - * Synchronization: internal. - */ - ecache_t ecache_dirty; - ecache_t ecache_muzzy; - ecache_t ecache_retained; + /* The page-level allocator shard this arena uses. */ + pa_shard_t pa_shard; /* * Decay-based purging state, responsible for scheduling extent state diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index 5146ae1a..4e73f10f 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -6,4 +6,17 @@ * allocations. */ +typedef struct pa_shard_s pa_shard_t; +struct pa_shard_s { + /* + * Collections of extents that were previously allocated. These are + * used when allocating extents, in an attempt to re-use address space. + * + * Synchronization: internal. + */ + ecache_t ecache_dirty; + ecache_t ecache_muzzy; + ecache_t ecache_retained; +}; + #endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/src/arena.c b/src/arena.c index f6876e35..d9932b13 100644 --- a/src/arena.c +++ b/src/arena.c @@ -74,8 +74,8 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, *dirty_decay_ms = arena_dirty_decay_ms_get(arena); *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); - *ndirty += ecache_npages_get(&arena->ecache_dirty); - *nmuzzy += ecache_npages_get(&arena->ecache_muzzy); + *ndirty += ecache_npages_get(&arena->pa_shard.ecache_dirty); + *nmuzzy += ecache_npages_get(&arena->pa_shard.ecache_muzzy); } void @@ -98,7 +98,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_accum_zu(&astats->mapped, base_mapped + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); arena_stats_accum_zu(&astats->retained, - ecache_npages_get(&arena->ecache_retained) << LG_PAGE); + ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE); atomic_store_zu(&astats->edata_avail, atomic_load_zu(&arena->edata_cache.count, ATOMIC_RELAXED), @@ -129,8 +129,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); arena_stats_accum_zu(&astats->resident, base_resident + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + - ecache_npages_get(&arena->ecache_dirty) + - ecache_npages_get(&arena->ecache_muzzy)) << LG_PAGE))); + ecache_npages_get(&arena->pa_shard.ecache_dirty) + + ecache_npages_get(&arena->pa_shard.ecache_muzzy)) << LG_PAGE))); arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( &arena->stats.abandoned_vm, ATOMIC_RELAXED)); @@ -172,12 +172,16 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, for (pszind_t i = 0; i < SC_NPSIZES; i++) { size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, retained_bytes; - dirty = ecache_nextents_get(&arena->ecache_dirty, i); - muzzy = ecache_nextents_get(&arena->ecache_muzzy, i); - retained = ecache_nextents_get(&arena->ecache_retained, i); - dirty_bytes = ecache_nbytes_get(&arena->ecache_dirty, i); - muzzy_bytes = ecache_nbytes_get(&arena->ecache_muzzy, i); - retained_bytes = ecache_nbytes_get(&arena->ecache_retained, i); + dirty = ecache_nextents_get(&arena->pa_shard.ecache_dirty, i); + muzzy = ecache_nextents_get(&arena->pa_shard.ecache_muzzy, i); + retained = ecache_nextents_get(&arena->pa_shard.ecache_retained, + i); + dirty_bytes = ecache_nbytes_get(&arena->pa_shard.ecache_dirty, + i); + muzzy_bytes = ecache_nbytes_get(&arena->pa_shard.ecache_muzzy, + i); + retained_bytes = ecache_nbytes_get( + &arena->pa_shard.ecache_retained, i); atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); @@ -226,11 +230,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); READ_ARENA_MUTEX_PROF_DATA(edata_cache.mtx, arena_prof_mutex_extent_avail) - READ_ARENA_MUTEX_PROF_DATA(ecache_dirty.mtx, + READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_dirty.mtx, arena_prof_mutex_extents_dirty) - READ_ARENA_MUTEX_PROF_DATA(ecache_muzzy.mtx, + READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy) - READ_ARENA_MUTEX_PROF_DATA(ecache_retained.mtx, + READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_retained.mtx, arena_prof_mutex_extents_retained) READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, arena_prof_mutex_decay_dirty) @@ -258,7 +262,8 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - ecache_dalloc(tsdn, arena, ehooks, &arena->ecache_dirty, edata); + ecache_dalloc(tsdn, arena, ehooks, &arena->pa_shard.ecache_dirty, + edata); if (arena_dirty_decay_ms_get(arena) == 0) { arena_decay_dirty(tsdn, arena, false, true); } else { @@ -434,16 +439,18 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, szind_t szind = sz_size2index(usize); size_t mapped_add; size_t esize = usize + sz_large_pad; - edata_t *edata = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_dirty, - NULL, esize, alignment, false, szind, zero); + edata_t *edata = ecache_alloc(tsdn, arena, ehooks, + &arena->pa_shard.ecache_dirty, NULL, esize, alignment, false, szind, + zero); if (edata == NULL && arena_may_have_muzzy(arena)) { - edata = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy, - NULL, esize, alignment, false, szind, zero); + edata = ecache_alloc(tsdn, arena, ehooks, + &arena->pa_shard.ecache_muzzy, NULL, esize, alignment, + false, szind, zero); } if (edata == NULL) { edata = ecache_alloc_grow(tsdn, arena, ehooks, - &arena->ecache_retained, NULL, esize, alignment, false, - szind, zero); + &arena->pa_shard.ecache_retained, NULL, esize, alignment, + false, szind, zero); if (config_stats) { /* * edata may be NULL on OOM, but in that case mapped_add @@ -808,14 +815,14 @@ bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, - &arena->ecache_dirty, decay_ms); + &arena->pa_shard.ecache_dirty, decay_ms); } bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, - &arena->ecache_muzzy, decay_ms); + &arena->pa_shard.ecache_muzzy, decay_ms); } static size_t @@ -867,7 +874,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, !extent_purge_lazy_wrapper(tsdn, arena, ehooks, edata, 0, edata_size_get(edata))) { ecache_dalloc(tsdn, arena, ehooks, - &arena->ecache_muzzy, edata); + &arena->pa_shard.ecache_muzzy, edata); arena_background_thread_inactivity_check(tsdn, arena, is_background_thread); break; @@ -978,18 +985,18 @@ static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_dirty, - &arena->ecache_dirty, is_background_thread, all); + &arena->pa_shard.ecache_dirty, is_background_thread, all); } static bool arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { - if (ecache_npages_get(&arena->ecache_muzzy) == 0 && + if (ecache_npages_get(&arena->pa_shard.ecache_muzzy) == 0 && arena_muzzy_decay_ms_get(arena) <= 0) { return false; } return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, - &arena->ecache_muzzy, is_background_thread, all); + &arena->pa_shard.ecache_muzzy, is_background_thread, all); } void @@ -1159,7 +1166,7 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { ehooks_t *ehooks = arena_get_ehooks(arena); edata_t *edata; while ((edata = ecache_evict(tsdn, arena, ehooks, - &arena->ecache_retained, 0)) != NULL) { + &arena->pa_shard.ecache_retained, 0)) != NULL) { extent_destroy_wrapper(tsdn, arena, ehooks, edata); } } @@ -1175,8 +1182,8 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached * extents, so only retained extents may remain. */ - assert(ecache_npages_get(&arena->ecache_dirty) == 0); - assert(ecache_npages_get(&arena->ecache_muzzy) == 0); + assert(ecache_npages_get(&arena->pa_shard.ecache_dirty) == 0); + assert(ecache_npages_get(&arena->pa_shard.ecache_muzzy) == 0); /* Deallocate retained memory. */ arena_destroy_retained(tsd_tsdn(tsd), arena); @@ -1210,8 +1217,9 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, WITNESS_RANK_CORE, 0); zero = false; - slab = ecache_alloc_grow(tsdn, arena, ehooks, &arena->ecache_retained, - NULL, bin_info->slab_size, PAGE, true, szind, &zero); + slab = ecache_alloc_grow(tsdn, arena, ehooks, + &arena->pa_shard.ecache_retained, NULL, bin_info->slab_size, PAGE, + true, szind, &zero); if (config_stats && slab != NULL) { arena_stats_mapped_add(tsdn, &arena->stats, @@ -1230,11 +1238,13 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard ehooks_t *ehooks = arena_get_ehooks(arena); szind_t szind = sz_size2index(bin_info->reg_size); bool zero = false; - edata_t *slab = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_dirty, - NULL, bin_info->slab_size, PAGE, true, binind, &zero); + edata_t *slab = ecache_alloc(tsdn, arena, ehooks, + &arena->pa_shard.ecache_dirty, NULL, bin_info->slab_size, PAGE, + true, binind, &zero); if (slab == NULL && arena_may_have_muzzy(arena)) { - slab = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy, - NULL, bin_info->slab_size, PAGE, true, binind, &zero); + slab = ecache_alloc(tsdn, arena, ehooks, + &arena->pa_shard.ecache_muzzy, NULL, bin_info->slab_size, + PAGE, true, binind, &zero); } if (slab == NULL) { slab = arena_slab_alloc_hard(tsdn, arena, ehooks, bin_info, @@ -2023,16 +2033,16 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { * are likely to be reused soon after deallocation, and the cost of * merging/splitting extents is non-trivial. */ - if (ecache_init(tsdn, &arena->ecache_dirty, extent_state_dirty, ind, - true)) { + if (ecache_init(tsdn, &arena->pa_shard.ecache_dirty, extent_state_dirty, + ind, true)) { goto label_error; } /* * Coalesce muzzy extents immediately, because operations on them are in * the critical path much less often than for dirty extents. */ - if (ecache_init(tsdn, &arena->ecache_muzzy, extent_state_muzzy, ind, - false)) { + if (ecache_init(tsdn, &arena->pa_shard.ecache_muzzy, extent_state_muzzy, + ind, false)) { goto label_error; } /* @@ -2041,8 +2051,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { * coalescing), but also because operations on retained extents are not * in the critical path. */ - if (ecache_init(tsdn, &arena->ecache_retained, extent_state_retained, - ind, false)) { + if (ecache_init(tsdn, &arena->pa_shard.ecache_retained, + extent_state_retained, ind, false)) { goto label_error; } @@ -2198,9 +2208,9 @@ arena_prefork2(tsdn_t *tsdn, arena_t *arena) { void arena_prefork3(tsdn_t *tsdn, arena_t *arena) { - ecache_prefork(tsdn, &arena->ecache_dirty); - ecache_prefork(tsdn, &arena->ecache_muzzy); - ecache_prefork(tsdn, &arena->ecache_retained); + ecache_prefork(tsdn, &arena->pa_shard.ecache_dirty); + ecache_prefork(tsdn, &arena->pa_shard.ecache_muzzy); + ecache_prefork(tsdn, &arena->pa_shard.ecache_retained); } void @@ -2240,9 +2250,9 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); base_postfork_parent(tsdn, arena->base); edata_cache_postfork_parent(tsdn, &arena->edata_cache); - ecache_postfork_parent(tsdn, &arena->ecache_dirty); - ecache_postfork_parent(tsdn, &arena->ecache_muzzy); - ecache_postfork_parent(tsdn, &arena->ecache_retained); + ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_dirty); + ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_muzzy); + ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_retained); ecache_grow_postfork_parent(tsdn, &arena->ecache_grow); malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); @@ -2286,9 +2296,9 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_postfork_child(tsdn, &arena->large_mtx); base_postfork_child(tsdn, arena->base); edata_cache_postfork_child(tsdn, &arena->edata_cache); - ecache_postfork_child(tsdn, &arena->ecache_dirty); - ecache_postfork_child(tsdn, &arena->ecache_muzzy); - ecache_postfork_child(tsdn, &arena->ecache_retained); + ecache_postfork_child(tsdn, &arena->pa_shard.ecache_dirty); + ecache_postfork_child(tsdn, &arena->pa_shard.ecache_muzzy); + ecache_postfork_child(tsdn, &arena->pa_shard.ecache_retained); ecache_grow_postfork_child(tsdn, &arena->ecache_grow); malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); diff --git a/src/background_thread.c b/src/background_thread.c index ca06be02..ddfe3a35 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -202,12 +202,12 @@ static uint64_t arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { uint64_t i1, i2; i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, - &arena->ecache_dirty); + &arena->pa_shard.ecache_dirty); if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { return i1; } i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, - &arena->ecache_muzzy); + &arena->pa_shard.ecache_muzzy); return i1 < i2 ? i1 : i2; } @@ -717,8 +717,8 @@ background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { should_signal = true; } else if (unlikely(background_thread_indefinite_sleep(info)) && - (ecache_npages_get(&arena->ecache_dirty) > 0 || - ecache_npages_get(&arena->ecache_muzzy) > 0 || + (ecache_npages_get(&arena->pa_shard.ecache_dirty) > 0 || + ecache_npages_get(&arena->pa_shard.ecache_muzzy) > 0 || info->npages_to_purge_new > 0)) { should_signal = true; } else { diff --git a/src/ctl.c b/src/ctl.c index 86ac83e1..1c180696 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -3073,9 +3073,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, } MUTEX_PROF_RESET(arena->large_mtx); MUTEX_PROF_RESET(arena->edata_cache.mtx); - MUTEX_PROF_RESET(arena->ecache_dirty.mtx); - MUTEX_PROF_RESET(arena->ecache_muzzy.mtx); - MUTEX_PROF_RESET(arena->ecache_retained.mtx); + MUTEX_PROF_RESET(arena->pa_shard.ecache_dirty.mtx); + MUTEX_PROF_RESET(arena->pa_shard.ecache_muzzy.mtx); + MUTEX_PROF_RESET(arena->pa_shard.ecache_retained.mtx); MUTEX_PROF_RESET(arena->decay_dirty.mtx); MUTEX_PROF_RESET(arena->decay_muzzy.mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx); diff --git a/src/extent.c b/src/extent.c index 54ac40b2..d684388d 100644 --- a/src/extent.c +++ b/src/extent.c @@ -686,11 +686,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, if (result == extent_split_interior_ok) { if (lead != NULL) { extent_record(tsdn, arena, ehooks, - &arena->ecache_retained, lead, true); + &arena->pa_shard.ecache_retained, lead, true); } if (trail != NULL) { extent_record(tsdn, arena, ehooks, - &arena->ecache_retained, trail, true); + &arena->pa_shard.ecache_retained, trail, true); } } else { /* @@ -703,12 +703,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_gdump_add(tsdn, to_salvage); } extent_record(tsdn, arena, ehooks, - &arena->ecache_retained, to_salvage, true); + &arena->pa_shard.ecache_retained, to_salvage, true); } if (to_leak != NULL) { extent_deregister_no_gdump_sub(tsdn, to_leak); extents_abandon_vm(tsdn, arena, ehooks, - &arena->ecache_retained, to_leak, true); + &arena->pa_shard.ecache_retained, to_leak, true); } goto label_err; } @@ -717,7 +717,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, if (extent_commit_impl(tsdn, ehooks, edata, 0, edata_size_get(edata), true)) { extent_record(tsdn, arena, ehooks, - &arena->ecache_retained, edata, true); + &arena->pa_shard.ecache_retained, edata, true); goto label_err; } /* A successful commit should return zeroed memory. */ @@ -774,8 +774,8 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, malloc_mutex_lock(tsdn, &arena->ecache_grow.mtx); edata_t *edata = extent_recycle(tsdn, arena, ehooks, - &arena->ecache_retained, new_addr, size, alignment, slab, szind, - zero, commit, true); + &arena->pa_shard.ecache_retained, new_addr, size, alignment, slab, + szind, zero, commit, true); if (edata != NULL) { malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx); if (config_prof) { @@ -974,7 +974,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache, edata = extent_try_coalesce(tsdn, &arena->edata_cache, ehooks, ecache, edata, NULL, growing_retained); } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) { - assert(ecache == &arena->ecache_dirty); + assert(ecache == &arena->pa_shard.ecache_dirty); /* Always coalesce large extents eagerly. */ bool coalesced; do { @@ -1076,8 +1076,8 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_gdump_sub(tsdn, edata); } - extent_record(tsdn, arena, ehooks, &arena->ecache_retained, edata, - false); + extent_record(tsdn, arena, ehooks, &arena->pa_shard.ecache_retained, + edata, false); } void diff --git a/src/large.c b/src/large.c index 1899a463..24ff3be7 100644 --- a/src/large.c +++ b/src/large.c @@ -119,19 +119,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, bool is_zeroed_trail = zero; edata_t *trail; bool new_mapping; - if ((trail = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_dirty, - edata_past_get(edata), trailsize, CACHELINE, false, SC_NSIZES, - &is_zeroed_trail)) != NULL - || (trail = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy, - edata_past_get(edata), trailsize, CACHELINE, false, SC_NSIZES, - &is_zeroed_trail)) != NULL) { + if ((trail = ecache_alloc(tsdn, arena, ehooks, + &arena->pa_shard.ecache_dirty, edata_past_get(edata), trailsize, + CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL + || (trail = ecache_alloc(tsdn, arena, ehooks, + &arena->pa_shard.ecache_muzzy, edata_past_get(edata), trailsize, + CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL) { if (config_stats) { new_mapping = false; } } else { if ((trail = ecache_alloc_grow(tsdn, arena, ehooks, - &arena->ecache_retained, edata_past_get(edata), trailsize, - CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) + &arena->pa_shard.ecache_retained, edata_past_get(edata), + trailsize, CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) == NULL) { return true; }