PA: Move in arena large allocation functionality.

This commit is contained in:
David Goldblatt 2020-03-10 12:29:12 -07:00 committed by David Goldblatt
parent 7624043a41
commit 9f93625c14
3 changed files with 45 additions and 28 deletions

View File

@ -119,4 +119,7 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx);
size_t pa_shard_extent_sn_next(pa_shard_t *shard); size_t pa_shard_extent_sn_next(pa_shard_t *shard);
edata_t *
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
bool slab, szind_t szind, bool *zero, size_t *mapped_add);
#endif /* JEMALLOC_INTERNAL_PA_H */ #endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -451,37 +451,12 @@ arena_may_have_muzzy(arena_t *arena) {
edata_t * edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) { size_t alignment, bool *zero) {
ehooks_t *ehooks = arena_get_ehooks(arena);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
szind_t szind = sz_size2index(usize); szind_t szind = sz_size2index(usize);
size_t mapped_add; size_t mapped_add;
size_t esize = usize + sz_large_pad; size_t esize = usize + sz_large_pad;
edata_t *edata = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_dirty, NULL, esize, alignment, false, szind, edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
zero); /* slab */ false, szind, zero, &mapped_add);
if (edata == NULL && arena_may_have_muzzy(arena)) {
edata = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_muzzy, NULL, esize, alignment,
false, szind, zero);
}
if (edata == NULL) {
edata = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_retained, NULL, esize, alignment,
false, szind, zero);
if (config_stats) {
/*
* edata may be NULL on OOM, but in that case mapped_add
* isn't used below, so there's no need to conditionlly
* set it to 0 here.
*/
mapped_add = esize;
}
} else if (config_stats) {
mapped_add = 0;
}
if (edata != NULL) { if (edata != NULL) {
if (config_stats) { if (config_stats) {

View File

@ -57,3 +57,42 @@ size_t
pa_shard_extent_sn_next(pa_shard_t *shard) { pa_shard_extent_sn_next(pa_shard_t *shard) {
return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED); return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED);
} }
static bool
pa_shard_may_have_muzzy(pa_shard_t *shard) {
return pa_shard_muzzy_decay_ms_get(shard) != 0;
}
edata_t *
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
bool slab, szind_t szind, bool *zero, size_t *mapped_add) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
edata_t *edata = ecache_alloc(tsdn, shard, ehooks,
&shard->ecache_dirty, NULL, size, alignment, slab, szind,
zero);
if (edata == NULL && pa_shard_may_have_muzzy(shard)) {
edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
NULL, size, alignment, slab, szind, zero);
}
if (edata == NULL) {
edata = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, NULL, size, alignment, slab,
szind, zero);
if (config_stats) {
/*
* edata may be NULL on OOM, but in that case mapped_add
* isn't used below, so there's no need to conditionlly
* set it to 0 here.
*/
*mapped_add = size;
}
} else if (config_stats) {
*mapped_add = 0;
}
return edata;
}