diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index 827c0b5e..e1821e69 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -119,4 +119,7 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); size_t pa_shard_extent_sn_next(pa_shard_t *shard); +edata_t * +pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, + bool slab, szind_t szind, bool *zero, size_t *mapped_add); #endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/src/arena.c b/src/arena.c index 7934a6bf..1e3ae6e5 100644 --- a/src/arena.c +++ b/src/arena.c @@ -451,37 +451,12 @@ arena_may_have_muzzy(arena_t *arena) { edata_t * arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero) { - ehooks_t *ehooks = arena_get_ehooks(arena); - - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - szind_t szind = sz_size2index(usize); size_t mapped_add; size_t esize = usize + sz_large_pad; - edata_t *edata = ecache_alloc(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_dirty, NULL, esize, alignment, false, szind, - zero); - if (edata == NULL && arena_may_have_muzzy(arena)) { - edata = ecache_alloc(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_muzzy, NULL, esize, alignment, - false, szind, zero); - } - if (edata == NULL) { - edata = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_retained, NULL, esize, alignment, - false, szind, zero); - if (config_stats) { - /* - * edata may be NULL on OOM, but in that case mapped_add - * isn't used below, so there's no need to conditionlly - * set it to 0 here. - */ - mapped_add = esize; - } - } else if (config_stats) { - mapped_add = 0; - } + + edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment, + /* slab */ false, szind, zero, &mapped_add); if (edata != NULL) { if (config_stats) { diff --git a/src/pa.c b/src/pa.c index a4ec4bd0..072d4852 100644 --- a/src/pa.c +++ b/src/pa.c @@ -57,3 +57,42 @@ size_t pa_shard_extent_sn_next(pa_shard_t *shard) { return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED); } + +static bool +pa_shard_may_have_muzzy(pa_shard_t *shard) { + return pa_shard_muzzy_decay_ms_get(shard) != 0; +} + +edata_t * +pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, + bool slab, szind_t szind, bool *zero, size_t *mapped_add) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + ehooks_t *ehooks = pa_shard_ehooks_get(shard); + + edata_t *edata = ecache_alloc(tsdn, shard, ehooks, + &shard->ecache_dirty, NULL, size, alignment, slab, szind, + zero); + if (edata == NULL && pa_shard_may_have_muzzy(shard)) { + edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy, + NULL, size, alignment, slab, szind, zero); + } + + if (edata == NULL) { + edata = ecache_alloc_grow(tsdn, shard, ehooks, + &shard->ecache_retained, NULL, size, alignment, slab, + szind, zero); + if (config_stats) { + /* + * edata may be NULL on OOM, but in that case mapped_add + * isn't used below, so there's no need to conditionlly + * set it to 0 here. + */ + *mapped_add = size; + } + } else if (config_stats) { + *mapped_add = 0; + } + return edata; +}