PA: Move mapped into pa stats.

This commit is contained in:
David Goldblatt 2020-03-09 12:06:19 -07:00 committed by David Goldblatt
parent 6ca918d0cf
commit 70d12ffa05
7 changed files with 44 additions and 35 deletions

View File

@ -148,15 +148,18 @@ arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
extent_dalloc_wrapper(tsdn, arena, ehooks, edata); extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
if (config_stats) { if (config_stats) {
/* Update stats accordingly. */ /* Update stats accordingly. */
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); LOCKEDINT_MTX_LOCK(tsdn, *arena->pa_shard.stats_mtx);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_inc_u64(tsdn,
LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
&arena->pa_shard.stats->decay_dirty.nmadvise, 1); &arena->pa_shard.stats->decay_dirty.nmadvise, 1);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_inc_u64(tsdn,
LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
&arena->pa_shard.stats->decay_dirty.purged, &arena->pa_shard.stats->decay_dirty.purged,
extent_size >> LG_PAGE); extent_size >> LG_PAGE);
locked_dec_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_dec_zu(tsdn,
&arena->stats.mapped, extent_size); LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); &arena->pa_shard.stats->mapped, extent_size);
LOCKEDINT_MTX_UNLOCK(tsdn, *arena->pa_shard.stats_mtx);
} }
} }

View File

@ -61,12 +61,6 @@ struct arena_stats_extents_s {
typedef struct arena_stats_s arena_stats_t; typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s { struct arena_stats_s {
LOCKEDINT_MTX_DECLARE(mtx) LOCKEDINT_MTX_DECLARE(mtx)
/*
* Number of bytes currently mapped, excluding retained memory.
*/
locked_zu_t mapped; /* Partially derived. */
/* /*
* Number of unused virtual memory bytes currently retained. Retained * Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged), * bytes are technically mapped (though always decommitted or purged),
@ -135,12 +129,4 @@ arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx); LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
} }
static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
locked_inc_zu(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&arena_stats->mapped, size);
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */

View File

@ -35,6 +35,14 @@ typedef struct pa_shard_stats_s pa_shard_stats_t;
struct pa_shard_stats_s { struct pa_shard_stats_s {
pa_shard_decay_stats_t decay_dirty; pa_shard_decay_stats_t decay_dirty;
pa_shard_decay_stats_t decay_muzzy; pa_shard_decay_stats_t decay_muzzy;
/*
* Number of bytes currently mapped, excluding retained memory.
*
* Partially derived -- we maintain our own counter, but add in the
* base's own counter at merge.
*/
locked_zu_t mapped;
/* VM space had to be leaked (undocumented). Normally 0. */ /* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm; atomic_zu_t abandoned_vm;
}; };
@ -60,12 +68,21 @@ struct pa_shard_s {
/* Extent serial number generator state. */ /* Extent serial number generator state. */
atomic_zu_t extent_sn_next; atomic_zu_t extent_sn_next;
malloc_mutex_t *stats_mtx;
pa_shard_stats_t *stats; pa_shard_stats_t *stats;
}; };
static inline void
pa_shard_stats_mapped_add(tsdn_t *tsdn, pa_shard_t *shard, size_t size) {
LOCKEDINT_MTX_LOCK(tsdn, *shard->stats_mtx);
locked_inc_zu(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->mapped, size);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
}
/* Returns true on error. */ /* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats); pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx);
size_t pa_shard_extent_sn_next(pa_shard_t *shard); size_t pa_shard_extent_sn_next(pa_shard_t *shard);
#endif /* JEMALLOC_INTERNAL_PA_H */ #endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -95,9 +95,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_zu_unsynchronized(&astats->mapped, base_mapped locked_inc_zu_unsynchronized(&astats->pa_shard_stats.mapped,
+ locked_read_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), base_mapped + locked_read_zu(tsdn,
&arena->stats.mapped)); LOCKEDINT_MTX(*arena->pa_shard.stats_mtx),
&arena->pa_shard.stats->mapped));
locked_inc_zu_unsynchronized(&astats->retained, locked_inc_zu_unsynchronized(&astats->retained,
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE); ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE);
@ -488,7 +489,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
if (mapped_add != 0) { if (mapped_add != 0) {
locked_inc_zu(tsdn, locked_inc_zu(tsdn,
LOCKEDINT_MTX(arena->stats.mtx), LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.mapped, mapped_add); &arena->pa_shard.stats->mapped, mapped_add);
} }
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
@ -919,7 +920,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&decay->stats->purged, npurged); &decay->stats->purged, npurged);
locked_dec_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_dec_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.mapped, nunmapped << LG_PAGE); &arena->pa_shard.stats->mapped, nunmapped << LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
@ -1240,7 +1241,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
true, szind, &zero); true, szind, &zero);
if (config_stats && slab != NULL) { if (config_stats && slab != NULL) {
arena_stats_mapped_add(tsdn, &arena->stats, pa_shard_stats_mapped_add(tsdn, &arena->pa_shard,
bin_info->slab_size); bin_info->slab_size);
} }
@ -2039,7 +2040,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
if (pa_shard_init(tsdn, &arena->pa_shard, base, ind, if (pa_shard_init(tsdn, &arena->pa_shard, base, ind,
&arena->stats.pa_shard_stats)) { &arena->stats.pa_shard_stats, LOCKEDINT_MTX(arena->stats.mtx))) {
goto label_error; goto label_error;
} }

View File

@ -861,8 +861,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats; ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) { if (!destroyed) {
ctl_accum_locked_zu(&sdstats->astats.mapped, ctl_accum_locked_zu(
&astats->astats.mapped); &sdstats->astats.pa_shard_stats.mapped,
&astats->astats.pa_shard_stats.mapped);
ctl_accum_locked_zu(&sdstats->astats.retained, ctl_accum_locked_zu(&sdstats->astats.retained,
&astats->astats.retained); &astats->astats.retained);
ctl_accum_atomic_zu(&sdstats->astats.edata_avail, ctl_accum_atomic_zu(&sdstats->astats.edata_avail,
@ -1103,7 +1104,7 @@ ctl_refresh(tsdn_t *tsdn) {
ctl_stats->resident = atomic_load_zu( ctl_stats->resident = atomic_load_zu(
&ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
ctl_stats->mapped = locked_read_atomic_zu( ctl_stats->mapped = locked_read_atomic_zu(
&ctl_sarena->astats->astats.mapped); &ctl_sarena->astats->astats.pa_shard_stats.mapped);
ctl_stats->retained = locked_read_atomic_zu( ctl_stats->retained = locked_read_atomic_zu(
&ctl_sarena->astats->astats.retained); &ctl_sarena->astats->astats.retained);
@ -2914,8 +2915,8 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.mapped), locked_read_atomic_zu(&arenas_i(
size_t) mib[2])->astats->astats.pa_shard_stats.mapped), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained, CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.retained), locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.retained),
size_t) size_t)

View File

@ -151,7 +151,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
emap_remap(tsdn, &emap_global, edata, szind, false); emap_remap(tsdn, &emap_global, edata, szind, false);
if (config_stats && new_mapping) { if (config_stats && new_mapping) {
arena_stats_mapped_add(tsdn, &arena->stats, trailsize); pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, trailsize);
} }
if (zero) { if (zero) {

View File

@ -3,7 +3,7 @@
bool bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats) { pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx) {
/* This will change eventually, but for now it should hold. */ /* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind); assert(base_ind_get(base) == ind);
/* /*
@ -44,6 +44,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED); atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED);
shard->stats_mtx = stats_mtx;
shard->stats = stats; shard->stats = stats;
memset(shard->stats, 0, sizeof(*shard->stats)); memset(shard->stats, 0, sizeof(*shard->stats));