PA: Make mapped stat atomic.

We always have atomic_zu_t, and mapped/unmapped transitions are always expensive
enough that trying to piggyback on a lock is a waste of time.
This commit is contained in:
David Goldblatt 2020-03-11 18:37:15 -07:00 committed by David Goldblatt
parent 3c28aa6f17
commit 436789ad96
6 changed files with 22 additions and 30 deletions

View File

@ -67,7 +67,7 @@ struct pa_shard_stats_s {
* Partially derived -- we maintain our own counter, but add in the
* base's own counter at merge.
*/
locked_zu_t mapped;
atomic_zu_t mapped;
/* Number of edata_t structs allocated by base, but not being used. */
size_t edata_avail; /* Derived. */
@ -135,14 +135,6 @@ struct pa_shard_s {
base_t *base;
};
static inline void
pa_shard_stats_mapped_add(tsdn_t *tsdn, pa_shard_t *shard, size_t size) {
LOCKEDINT_MTX_LOCK(tsdn, *shard->stats_mtx);
locked_inc_zu(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->mapped, size);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
}
static inline ssize_t
pa_shard_dirty_decay_ms_get(pa_shard_t *shard) {
return decay_ms_read(&shard->decay_dirty);

View File

@ -90,16 +90,15 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
size_t mapped = atomic_load_zu(&arena->pa_shard.stats->mapped,
ATOMIC_RELAXED);
atomic_load_add_store_zu(&astats->pa_shard_stats.mapped,
base_mapped + mapped);
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_zu_unsynchronized(&astats->pa_shard_stats.mapped,
base_mapped + locked_read_zu(tsdn,
LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
&arena->pa_shard.stats->mapped));
locked_inc_zu_unsynchronized(&astats->retained,
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE);
astats->pa_shard_stats.edata_avail = atomic_load_zu(
&arena->pa_shard.edata_cache.count, ATOMIC_RELAXED);
@ -436,9 +435,9 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_malloc_stats_update(tsdn, arena, usize);
if (mapped_add != 0) {
locked_inc_zu(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->mapped, mapped_add);
atomic_fetch_add_zu(
&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
@ -848,8 +847,8 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
PAGE, /* slab */ true, /* szind */ binind, &zero, &mapped_add);
if (config_stats && slab != NULL && mapped_add != 0) {
pa_shard_stats_mapped_add(tsdn, &arena->pa_shard,
bin_info->slab_size);
atomic_fetch_add_zu(&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
if (slab == NULL) {

View File

@ -861,7 +861,7 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) {
ctl_accum_locked_zu(
ctl_accum_atomic_zu(
&sdstats->astats.pa_shard_stats.mapped,
&astats->astats.pa_shard_stats.mapped);
ctl_accum_locked_zu(&sdstats->astats.retained,
@ -1101,8 +1101,9 @@ ctl_refresh(tsdn_t *tsdn) {
&ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
ctl_stats->resident = atomic_load_zu(
&ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
ctl_stats->mapped = locked_read_atomic_zu(
&ctl_sarena->astats->astats.pa_shard_stats.mapped);
ctl_stats->mapped = atomic_load_zu(
&ctl_sarena->astats->astats.pa_shard_stats.mapped,
ATOMIC_RELAXED);
ctl_stats->retained = locked_read_atomic_zu(
&ctl_sarena->astats->astats.retained);
@ -2913,8 +2914,8 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
locked_read_atomic_zu(&arenas_i(
mib[2])->astats->astats.pa_shard_stats.mapped), size_t)
atomic_load_zu(&arenas_i(mib[2])->astats->astats.pa_shard_stats.mapped,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.retained),
size_t)

View File

@ -966,10 +966,9 @@ extent_maximally_purge(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.purged,
extent_size >> LG_PAGE);
locked_dec_zu(tsdn,
LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->mapped, extent_size);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
atomic_fetch_sub_zu(&shard->stats->mapped, extent_size,
ATOMIC_RELAXED);
}
}

View File

@ -122,7 +122,8 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
}
if (config_stats && mapped_add > 0) {
pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, mapped_add);
atomic_fetch_add_zu(&arena->pa_shard.stats->mapped, mapped_add,
ATOMIC_RELAXED);
}
if (zero) {

View File

@ -268,9 +268,9 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
&decay_stats->nmadvise, nmadvise);
locked_inc_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&decay_stats->purged, npurged);
locked_dec_zu(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->mapped, nunmapped << LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
atomic_fetch_sub_zu(&shard->stats->mapped, nunmapped << LG_PAGE,
ATOMIC_RELAXED);
}
return npurged;