PA: Move in all modifications of mapped.

This commit is contained in:
David Goldblatt 2020-03-11 18:49:15 -07:00 committed by David Goldblatt
parent 436789ad96
commit e2cf3fb1a3
4 changed files with 22 additions and 38 deletions

View File

@ -162,10 +162,10 @@ size_t pa_shard_extent_sn_next(pa_shard_t *shard);
/* Gets an edata for the given allocation. */
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add);
size_t alignment, bool slab, szind_t szind, bool *zero);
/* Returns true on error, in which case nothing changed. */
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add);
size_t new_size, szind_t szind, bool slab, bool *zero);
/*
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
* false otherwise.

View File

@ -424,21 +424,15 @@ edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) {
szind_t szind = sz_size2index(usize);
size_t mapped_add;
size_t esize = usize + sz_large_pad;
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
/* slab */ false, szind, zero, &mapped_add);
/* slab */ false, szind, zero);
if (edata != NULL) {
if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_malloc_stats_update(tsdn, arena, usize);
if (mapped_add != 0) {
atomic_fetch_add_zu(
&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
}
@ -842,14 +836,9 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
WITNESS_RANK_CORE, 0);
bool zero = false;
size_t mapped_add = 0;
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
PAGE, /* slab */ true, /* szind */ binind, &zero, &mapped_add);
if (config_stats && slab != NULL && mapped_add != 0) {
atomic_fetch_add_zu(&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
PAGE, /* slab */ true, /* szind */ binind, &zero);
if (slab == NULL) {
return NULL;

View File

@ -113,19 +113,13 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
* below, even if is_zeroed_trail ends up true when zero is false.
*/
bool is_zeroed_trail = zero;
size_t mapped_add;
szind_t szind = sz_size2index(usize);
bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
szind, /* slab */ false, &is_zeroed_trail, &mapped_add);
szind, /* slab */ false, &is_zeroed_trail);
if (err) {
return true;
}
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
if (zero) {
if (config_cache_oblivious) {
/*

View File

@ -77,16 +77,17 @@ pa_shard_may_have_muzzy(pa_shard_t *shard) {
edata_t *
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
bool slab, szind_t szind, bool *zero, size_t *mapped_add) {
bool slab, szind_t szind, bool *zero) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
*mapped_add = 0;
size_t mapped_add = 0;
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
edata_t *edata = ecache_alloc(tsdn, shard, ehooks,
&shard->ecache_dirty, NULL, size, alignment, slab, szind,
zero);
if (edata == NULL && pa_shard_may_have_muzzy(shard)) {
edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
NULL, size, alignment, slab, szind, zero);
@ -95,24 +96,21 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
edata = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, NULL, size, alignment, slab,
szind, zero);
if (config_stats && edata != NULL) {
/*
* edata may be NULL on OOM, but in that case mapped_add
* isn't used below, so there's no need to conditionlly
* set it to 0 here.
*/
*mapped_add = size;
}
mapped_add = size;
}
if (edata != NULL) {
pa_nactive_add(shard, size >> LG_PAGE);
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
}
return edata;
}
bool
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) {
size_t new_size, szind_t szind, bool slab, bool *zero) {
assert(new_size > old_size);
assert(edata_size_get(edata) == old_size);
assert((new_size & PAGE_MASK) == 0);
@ -121,7 +119,8 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
void *trail_begin = edata_past_get(edata);
size_t expand_amount = new_size - old_size;
*mapped_add = 0;
size_t mapped_add = 0;
if (ehooks_merge_will_fail(ehooks)) {
return true;
}
@ -137,18 +136,20 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
trail = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, trail_begin, expand_amount, PAGE,
/* slab */ false, SC_NSIZES, zero);
*mapped_add = expand_amount;
mapped_add = expand_amount;
}
if (trail == NULL) {
*mapped_add = 0;
return true;
}
if (extent_merge_wrapper(tsdn, ehooks, &shard->edata_cache, edata,
trail)) {
extent_dalloc_wrapper(tsdn, shard, ehooks, trail);
*mapped_add = 0;
return true;
}
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
pa_nactive_add(shard, expand_amount >> LG_PAGE);
emap_remap(tsdn, &emap_global, edata, szind, slab);
return false;