Arena stats: Move retained to PA, use plain ints.

Retained is a property of the allocated pages.  The derived fields no longer
require any locking; they're computed on demand.
This commit is contained in:
David Goldblatt 2020-03-11 19:24:05 -07:00 committed by David Goldblatt
parent e2cf3fb1a3
commit d0c43217b5
4 changed files with 44 additions and 44 deletions

View File

@ -45,17 +45,16 @@ struct arena_stats_large_s {
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
LOCKEDINT_MTX_DECLARE(mtx)
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
locked_zu_t retained; /* Derived. */
atomic_zu_t base; /* Derived. */
/*
* resident includes the base stats -- that's why it lives here and not
* in pa_shard_stats_t.
*/
size_t base; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */
locked_u64_t nmalloc_large; /* Derived. */

View File

@ -61,6 +61,14 @@ typedef struct pa_shard_stats_s pa_shard_stats_t;
struct pa_shard_stats_s {
pa_shard_decay_stats_t decay_dirty;
pa_shard_decay_stats_t decay_muzzy;
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
size_t retained; /* Derived. */
/*
* Number of bytes currently mapped, excluding retained memory.
*

View File

@ -97,8 +97,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_zu_unsynchronized(&astats->retained,
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE);
astats->pa_shard_stats.retained +=
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE;
astats->pa_shard_stats.edata_avail = atomic_load_zu(
&arena->pa_shard.edata_cache.count, ATOMIC_RELAXED);
@ -130,13 +130,17 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_muzzy.purged));
atomic_load_add_store_zu(&astats->base, base_allocated);
astats->base += base_allocated;
atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
atomic_load_add_store_zu(&astats->metadata_thp, metadata_thp);
atomic_load_add_store_zu(&astats->resident, base_resident +
(((atomic_load_zu(&arena->pa_shard.nactive, ATOMIC_RELAXED) +
ecache_npages_get(&arena->pa_shard.ecache_dirty) +
ecache_npages_get(&arena->pa_shard.ecache_muzzy)) << LG_PAGE)));
astats->metadata_thp += metadata_thp;
size_t pa_resident_pgs = 0;
pa_resident_pgs
+= atomic_load_zu(&arena->pa_shard.nactive, ATOMIC_RELAXED);
pa_resident_pgs
+= ecache_npages_get(&arena->pa_shard.ecache_dirty);
astats->resident += base_resident + (pa_resident_pgs << LG_PAGE);
atomic_load_add_store_zu(&astats->pa_shard_stats.abandoned_vm,
atomic_load_zu(&arena->stats.pa_shard_stats.abandoned_vm,
ATOMIC_RELAXED));

View File

@ -681,12 +681,6 @@ ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
locked_read_u64_unsynchronized(src));
}
static void
ctl_accum_locked_zu(locked_zu_t *dst, locked_zu_t *src) {
locked_inc_zu_unsynchronized(dst,
locked_read_atomic_zu(src));
}
static void
ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
@ -864,12 +858,13 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_accum_atomic_zu(
&sdstats->astats.pa_shard_stats.mapped,
&astats->astats.pa_shard_stats.mapped);
ctl_accum_locked_zu(&sdstats->astats.retained,
&astats->astats.retained);
sdstats->astats.pa_shard_stats.retained
+= astats->astats.pa_shard_stats.retained;
sdstats->astats.pa_shard_stats.edata_avail
+= astats->astats.pa_shard_stats.edata_avail;
}
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_dirty.npurge,
&astats->astats.pa_shard_stats.decay_dirty.npurge);
@ -898,14 +893,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
ctl_accum_atomic_zu(&sdstats->astats.base,
&astats->astats.base);
sdstats->astats.base += astats->astats.base;
sdstats->astats.resident += astats->astats.resident;
sdstats->astats.metadata_thp += astats->astats.metadata_thp;
ctl_accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal);
ctl_accum_atomic_zu(&sdstats->astats.resident,
&astats->astats.resident);
ctl_accum_atomic_zu(&sdstats->astats.metadata_thp,
&astats->astats.metadata_thp);
} else {
assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0);
@ -1093,19 +1085,17 @@ ctl_refresh(tsdn_t *tsdn) {
atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
ATOMIC_RELAXED);
ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
ctl_stats->metadata = atomic_load_zu(
&ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
ctl_stats->metadata = ctl_sarena->astats->astats.base +
atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED);
ctl_stats->metadata_thp = atomic_load_zu(
&ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
ctl_stats->resident = atomic_load_zu(
&ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
ctl_stats->resident = ctl_sarena->astats->astats.resident;
ctl_stats->metadata_thp =
ctl_sarena->astats->astats.metadata_thp;
ctl_stats->mapped = atomic_load_zu(
&ctl_sarena->astats->astats.pa_shard_stats.mapped,
ATOMIC_RELAXED);
ctl_stats->retained = locked_read_atomic_zu(
&ctl_sarena->astats->astats.retained);
ctl_stats->retained =
ctl_sarena->astats->astats.pa_shard_stats.retained;
ctl_background_thread_stats_read(tsdn);
@ -2917,7 +2907,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.pa_shard_stats.mapped,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.retained),
arenas_i(mib[2])->astats->astats.pa_shard_stats.retained,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
@ -2949,19 +2939,18 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
arenas_i(mib[2])->astats->astats.base,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
ATOMIC_RELAXED), size_t)
arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
arenas_i(mib[2])->astats->astats.resident,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
atomic_load_zu(