PA: Move in full stats merging.

This commit is contained in:
David Goldblatt 2020-03-12 10:28:18 -07:00 committed by David Goldblatt
parent 81c6027592
commit 238f3c7430
3 changed files with 79 additions and 67 deletions

View File

@ -233,4 +233,13 @@ void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
size_t *ndirty, size_t *nmuzzy);
static inline size_t
pa_shard_pa_mapped(pa_shard_t *shard) {
return atomic_load_zu(&shard->stats->pa_mapped, ATOMIC_RELAXED);
}
void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *shard_stats_out, pa_extent_stats_t *extent_stats_out,
size_t *resident);
#endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -88,60 +88,16 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
size_t pa_mapped = atomic_load_zu(&arena->pa_shard.stats->pa_mapped,
ATOMIC_RELAXED);
size_t pa_mapped = pa_shard_pa_mapped(&arena->pa_shard);
astats->mapped += base_mapped + pa_mapped;
astats->resident += base_resident;
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
astats->pa_shard_stats.retained +=
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE;
astats->pa_shard_stats.edata_avail += atomic_load_zu(
&arena->pa_shard.edata_cache.count, ATOMIC_RELAXED);
/* Dirty decay stats */
locked_inc_u64_unsynchronized(
&astats->pa_shard_stats.decay_dirty.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_dirty.npurge));
locked_inc_u64_unsynchronized(
&astats->pa_shard_stats.decay_dirty.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_dirty.nmadvise));
locked_inc_u64_unsynchronized(
&astats->pa_shard_stats.decay_dirty.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_dirty.purged));
/* Decay stats */
locked_inc_u64_unsynchronized(
&astats->pa_shard_stats.decay_muzzy.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_muzzy.npurge));
locked_inc_u64_unsynchronized(
&astats->pa_shard_stats.decay_muzzy.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_muzzy.nmadvise));
locked_inc_u64_unsynchronized(
&astats->pa_shard_stats.decay_muzzy.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->pa_shard.stats->decay_muzzy.purged));
astats->base += base_allocated;
atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
astats->metadata_thp += metadata_thp;
size_t pa_resident_pgs = 0;
pa_resident_pgs
+= atomic_load_zu(&arena->pa_shard.nactive, ATOMIC_RELAXED);
pa_resident_pgs
+= ecache_npages_get(&arena->pa_shard.ecache_dirty);
astats->resident += base_resident + (pa_resident_pgs << LG_PAGE);
atomic_load_add_store_zu(&astats->pa_shard_stats.abandoned_vm,
atomic_load_zu(&arena->stats.pa_shard_stats.abandoned_vm,
ATOMIC_RELAXED));
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
uint64_t nmalloc = locked_read_u64(tsdn,
LOCKEDINT_MTX(arena->stats.mtx),
@ -180,27 +136,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
curlextents * sz_index2size(SC_NBINS + i);
}
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
retained_bytes;
dirty = ecache_nextents_get(&arena->pa_shard.ecache_dirty, i);
muzzy = ecache_nextents_get(&arena->pa_shard.ecache_muzzy, i);
retained = ecache_nextents_get(&arena->pa_shard.ecache_retained,
i);
dirty_bytes = ecache_nbytes_get(&arena->pa_shard.ecache_dirty,
i);
muzzy_bytes = ecache_nbytes_get(&arena->pa_shard.ecache_muzzy,
i);
retained_bytes = ecache_nbytes_get(
&arena->pa_shard.ecache_retained, i);
estats[i].ndirty = dirty;
estats[i].nmuzzy = muzzy;
estats[i].nretained = retained;
estats[i].dirty_bytes = dirty_bytes;
estats[i].muzzy_bytes = muzzy_bytes;
estats[i].retained_bytes = retained_bytes;
}
pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
estats, &astats->resident);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);

View File

@ -61,3 +61,69 @@ pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
*ndirty += ecache_npages_get(&shard->ecache_dirty);
*nmuzzy += ecache_npages_get(&shard->ecache_muzzy);
}
void
pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *shard_stats_out, pa_extent_stats_t *extent_stats_out,
size_t *resident) {
cassert(config_stats);
shard_stats_out->retained +=
ecache_npages_get(&shard->ecache_retained) << LG_PAGE;
shard_stats_out->edata_avail += atomic_load_zu(
&shard->edata_cache.count, ATOMIC_RELAXED);
size_t resident_pgs = 0;
resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
resident_pgs += ecache_npages_get(&shard->ecache_dirty);
*resident += (resident_pgs << LG_PAGE);
/* Dirty decay stats */
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_dirty.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.npurge));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_dirty.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.nmadvise));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_dirty.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.purged));
/* Muzzy decay stats */
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_muzzy.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_muzzy.npurge));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_muzzy.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_muzzy.nmadvise));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_muzzy.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_muzzy.purged));
atomic_load_add_store_zu(&shard_stats_out->abandoned_vm,
atomic_load_zu(&shard->stats->abandoned_vm, ATOMIC_RELAXED));
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
retained_bytes;
dirty = ecache_nextents_get(&shard->ecache_dirty, i);
muzzy = ecache_nextents_get(&shard->ecache_muzzy, i);
retained = ecache_nextents_get(&shard->ecache_retained, i);
dirty_bytes = ecache_nbytes_get(&shard->ecache_dirty, i);
muzzy_bytes = ecache_nbytes_get(&shard->ecache_muzzy, i);
retained_bytes = ecache_nbytes_get(&shard->ecache_retained, i);
extent_stats_out[i].ndirty = dirty;
extent_stats_out[i].nmuzzy = muzzy;
extent_stats_out[i].nretained = retained;
extent_stats_out[i].dirty_bytes = dirty_bytes;
extent_stats_out[i].muzzy_bytes = muzzy_bytes;
extent_stats_out[i].retained_bytes = retained_bytes;
}
}