PA: Move in all modifications of mapped.

This commit is contained in:
David Goldblatt 2020-03-11 18:49:15 -07:00 committed by David Goldblatt
parent 436789ad96
commit e2cf3fb1a3
4 changed files with 22 additions and 38 deletions

View File

@ -162,10 +162,10 @@ size_t pa_shard_extent_sn_next(pa_shard_t *shard);
/* Gets an edata for the given allocation. */ /* Gets an edata for the given allocation. */
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add); size_t alignment, bool slab, szind_t szind, bool *zero);
/* Returns true on error, in which case nothing changed. */ /* Returns true on error, in which case nothing changed. */
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add); size_t new_size, szind_t szind, bool slab, bool *zero);
/* /*
* The same. Sets *generated_dirty to true if we produced new dirty pages, and * The same. Sets *generated_dirty to true if we produced new dirty pages, and
* false otherwise. * false otherwise.

View File

@ -424,21 +424,15 @@ edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) { size_t alignment, bool *zero) {
szind_t szind = sz_size2index(usize); szind_t szind = sz_size2index(usize);
size_t mapped_add;
size_t esize = usize + sz_large_pad; size_t esize = usize + sz_large_pad;
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment, edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
/* slab */ false, szind, zero, &mapped_add); /* slab */ false, szind, zero);
if (edata != NULL) { if (edata != NULL) {
if (config_stats) { if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_malloc_stats_update(tsdn, arena, usize); arena_large_malloc_stats_update(tsdn, arena, usize);
if (mapped_add != 0) {
atomic_fetch_add_zu(
&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
} }
@ -842,14 +836,9 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
bool zero = false; bool zero = false;
size_t mapped_add = 0;
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size, edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
PAGE, /* slab */ true, /* szind */ binind, &zero, &mapped_add); PAGE, /* slab */ true, /* szind */ binind, &zero);
if (config_stats && slab != NULL && mapped_add != 0) {
atomic_fetch_add_zu(&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
if (slab == NULL) { if (slab == NULL) {
return NULL; return NULL;

View File

@ -113,19 +113,13 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
* below, even if is_zeroed_trail ends up true when zero is false. * below, even if is_zeroed_trail ends up true when zero is false.
*/ */
bool is_zeroed_trail = zero; bool is_zeroed_trail = zero;
size_t mapped_add;
szind_t szind = sz_size2index(usize); szind_t szind = sz_size2index(usize);
bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size, bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
szind, /* slab */ false, &is_zeroed_trail, &mapped_add); szind, /* slab */ false, &is_zeroed_trail);
if (err) { if (err) {
return true; return true;
} }
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
if (zero) { if (zero) {
if (config_cache_oblivious) { if (config_cache_oblivious) {
/* /*

View File

@ -77,16 +77,17 @@ pa_shard_may_have_muzzy(pa_shard_t *shard) {
edata_t * edata_t *
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
bool slab, szind_t szind, bool *zero, size_t *mapped_add) { bool slab, szind_t szind, bool *zero) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
*mapped_add = 0;
size_t mapped_add = 0;
ehooks_t *ehooks = pa_shard_ehooks_get(shard); ehooks_t *ehooks = pa_shard_ehooks_get(shard);
edata_t *edata = ecache_alloc(tsdn, shard, ehooks, edata_t *edata = ecache_alloc(tsdn, shard, ehooks,
&shard->ecache_dirty, NULL, size, alignment, slab, szind, &shard->ecache_dirty, NULL, size, alignment, slab, szind,
zero); zero);
if (edata == NULL && pa_shard_may_have_muzzy(shard)) { if (edata == NULL && pa_shard_may_have_muzzy(shard)) {
edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy, edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
NULL, size, alignment, slab, szind, zero); NULL, size, alignment, slab, szind, zero);
@ -95,24 +96,21 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
edata = ecache_alloc_grow(tsdn, shard, ehooks, edata = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, NULL, size, alignment, slab, &shard->ecache_retained, NULL, size, alignment, slab,
szind, zero); szind, zero);
if (config_stats && edata != NULL) { mapped_add = size;
/*
* edata may be NULL on OOM, but in that case mapped_add
* isn't used below, so there's no need to conditionlly
* set it to 0 here.
*/
*mapped_add = size;
}
} }
if (edata != NULL) { if (edata != NULL) {
pa_nactive_add(shard, size >> LG_PAGE); pa_nactive_add(shard, size >> LG_PAGE);
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
} }
return edata; return edata;
} }
bool bool
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) { size_t new_size, szind_t szind, bool slab, bool *zero) {
assert(new_size > old_size); assert(new_size > old_size);
assert(edata_size_get(edata) == old_size); assert(edata_size_get(edata) == old_size);
assert((new_size & PAGE_MASK) == 0); assert((new_size & PAGE_MASK) == 0);
@ -121,7 +119,8 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
void *trail_begin = edata_past_get(edata); void *trail_begin = edata_past_get(edata);
size_t expand_amount = new_size - old_size; size_t expand_amount = new_size - old_size;
*mapped_add = 0; size_t mapped_add = 0;
if (ehooks_merge_will_fail(ehooks)) { if (ehooks_merge_will_fail(ehooks)) {
return true; return true;
} }
@ -137,18 +136,20 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
trail = ecache_alloc_grow(tsdn, shard, ehooks, trail = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, trail_begin, expand_amount, PAGE, &shard->ecache_retained, trail_begin, expand_amount, PAGE,
/* slab */ false, SC_NSIZES, zero); /* slab */ false, SC_NSIZES, zero);
*mapped_add = expand_amount; mapped_add = expand_amount;
} }
if (trail == NULL) { if (trail == NULL) {
*mapped_add = 0;
return true; return true;
} }
if (extent_merge_wrapper(tsdn, ehooks, &shard->edata_cache, edata, if (extent_merge_wrapper(tsdn, ehooks, &shard->edata_cache, edata,
trail)) { trail)) {
extent_dalloc_wrapper(tsdn, shard, ehooks, trail); extent_dalloc_wrapper(tsdn, shard, ehooks, trail);
*mapped_add = 0;
return true; return true;
} }
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
pa_nactive_add(shard, expand_amount >> LG_PAGE); pa_nactive_add(shard, expand_amount >> LG_PAGE);
emap_remap(tsdn, &emap_global, edata, szind, slab); emap_remap(tsdn, &emap_global, edata, szind, slab);
return false; return false;