Arena stats: Give it its own "mapped".

This distinguishes it from the PA mapped stat, which is now named "pa_mapped" to
avoid confusion. The (derived) arena stat includes base memory, and the PA stat
is no longer partially derived.
This commit is contained in:
David Goldblatt 2020-03-12 09:36:25 -07:00 committed by David Goldblatt
parent 506d907e40
commit 81c6027592
6 changed files with 17 additions and 21 deletions

View File

@ -53,6 +53,7 @@ struct arena_stats_s {
size_t base; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
size_t mapped; /* Derived. */
atomic_zu_t internal;

View File

@ -70,12 +70,13 @@ struct pa_shard_stats_s {
size_t retained; /* Derived. */
/*
* Number of bytes currently mapped, excluding retained memory.
* Number of bytes currently mapped, excluding retained memory (and any
* base-allocated memory, which is tracked by the arena stats).
*
* Partially derived -- we maintain our own counter, but add in the
* base's own counter at merge.
* We name this "pa_mapped" to avoid confusion with the arena_stats
* "mapped".
*/
atomic_zu_t mapped;
atomic_zu_t pa_mapped;
/* Number of edata_t structs allocated by base, but not being used. */
size_t edata_avail; /* Derived. */

View File

@ -88,10 +88,9 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
size_t mapped = atomic_load_zu(&arena->pa_shard.stats->mapped,
size_t pa_mapped = atomic_load_zu(&arena->pa_shard.stats->pa_mapped,
ATOMIC_RELAXED);
atomic_load_add_store_zu(&astats->pa_shard_stats.mapped,
base_mapped + mapped);
astats->mapped += base_mapped + pa_mapped;
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);

View File

@ -855,9 +855,7 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) {
ctl_accum_atomic_zu(
&sdstats->astats.pa_shard_stats.mapped,
&astats->astats.pa_shard_stats.mapped);
sdstats->astats.mapped += astats->astats.mapped;
sdstats->astats.pa_shard_stats.retained
+= astats->astats.pa_shard_stats.retained;
sdstats->astats.pa_shard_stats.edata_avail
@ -1085,9 +1083,7 @@ ctl_refresh(tsdn_t *tsdn) {
ctl_stats->resident = ctl_sarena->astats->astats.resident;
ctl_stats->metadata_thp =
ctl_sarena->astats->astats.metadata_thp;
ctl_stats->mapped = atomic_load_zu(
&ctl_sarena->astats->astats.pa_shard_stats.mapped,
ATOMIC_RELAXED);
ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
ctl_stats->retained =
ctl_sarena->astats->astats.pa_shard_stats.retained;
@ -2898,8 +2894,7 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.pa_shard_stats.mapped,
ATOMIC_RELAXED), size_t)
arenas_i(mib[2])->astats->astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
arenas_i(mib[2])->astats->astats.pa_shard_stats.retained,
size_t)

View File

@ -967,7 +967,7 @@ extent_maximally_purge(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
&shard->stats->decay_dirty.purged,
extent_size >> LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
atomic_fetch_sub_zu(&shard->stats->mapped, extent_size,
atomic_fetch_sub_zu(&shard->stats->pa_mapped, extent_size,
ATOMIC_RELAXED);
}
}

View File

@ -101,8 +101,8 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
if (edata != NULL) {
pa_nactive_add(shard, size >> LG_PAGE);
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->mapped, mapped_add,
ATOMIC_RELAXED);
atomic_fetch_add_zu(&shard->stats->pa_mapped,
mapped_add, ATOMIC_RELAXED);
}
}
return edata;
@ -147,7 +147,7 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
return true;
}
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->mapped, mapped_add,
atomic_fetch_add_zu(&shard->stats->pa_mapped, mapped_add,
ATOMIC_RELAXED);
}
pa_nactive_add(shard, expand_amount >> LG_PAGE);
@ -270,8 +270,8 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
locked_inc_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&decay_stats->purged, npurged);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
atomic_fetch_sub_zu(&shard->stats->mapped, nunmapped << LG_PAGE,
ATOMIC_RELAXED);
atomic_fetch_sub_zu(&shard->stats->pa_mapped,
nunmapped << LG_PAGE, ATOMIC_RELAXED);
}
return npurged;