diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index eac4a631..fd641754 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -148,15 +148,18 @@ arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks, extent_dalloc_wrapper(tsdn, arena, ehooks, edata); if (config_stats) { /* Update stats accordingly. */ - LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); - locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), + LOCKEDINT_MTX_LOCK(tsdn, *arena->pa_shard.stats_mtx); + locked_inc_u64(tsdn, + LOCKEDINT_MTX(*arena->pa_shard.stats_mtx), &arena->pa_shard.stats->decay_dirty.nmadvise, 1); - locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), + locked_inc_u64(tsdn, + LOCKEDINT_MTX(*arena->pa_shard.stats_mtx), &arena->pa_shard.stats->decay_dirty.purged, extent_size >> LG_PAGE); - locked_dec_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.mapped, extent_size); - LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); + locked_dec_zu(tsdn, + LOCKEDINT_MTX(*arena->pa_shard.stats_mtx), + &arena->pa_shard.stats->mapped, extent_size); + LOCKEDINT_MTX_UNLOCK(tsdn, *arena->pa_shard.stats_mtx); } } diff --git a/include/jemalloc/internal/arena_stats.h b/include/jemalloc/internal/arena_stats.h index 82996b8b..129a8fef 100644 --- a/include/jemalloc/internal/arena_stats.h +++ b/include/jemalloc/internal/arena_stats.h @@ -61,12 +61,6 @@ struct arena_stats_extents_s { typedef struct arena_stats_s arena_stats_t; struct arena_stats_s { LOCKEDINT_MTX_DECLARE(mtx) - - /* - * Number of bytes currently mapped, excluding retained memory. - */ - locked_zu_t mapped; /* Partially derived. */ - /* * Number of unused virtual memory bytes currently retained. Retained * bytes are technically mapped (though always decommitted or purged), @@ -135,12 +129,4 @@ arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx); } -static inline void -arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { - LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx); - locked_inc_zu(tsdn, LOCKEDINT_MTX(arena_stats->mtx), - &arena_stats->mapped, size); - LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx); -} - #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index a7c57896..61b6f42c 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -35,6 +35,14 @@ typedef struct pa_shard_stats_s pa_shard_stats_t; struct pa_shard_stats_s { pa_shard_decay_stats_t decay_dirty; pa_shard_decay_stats_t decay_muzzy; + /* + * Number of bytes currently mapped, excluding retained memory. + * + * Partially derived -- we maintain our own counter, but add in the + * base's own counter at merge. + */ + locked_zu_t mapped; + /* VM space had to be leaked (undocumented). Normally 0. */ atomic_zu_t abandoned_vm; }; @@ -60,12 +68,21 @@ struct pa_shard_s { /* Extent serial number generator state. */ atomic_zu_t extent_sn_next; + malloc_mutex_t *stats_mtx; pa_shard_stats_t *stats; }; +static inline void +pa_shard_stats_mapped_add(tsdn_t *tsdn, pa_shard_t *shard, size_t size) { + LOCKEDINT_MTX_LOCK(tsdn, *shard->stats_mtx); + locked_inc_zu(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->stats->mapped, size); + LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx); +} + /* Returns true on error. */ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, - pa_shard_stats_t *stats); + pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); size_t pa_shard_extent_sn_next(pa_shard_t *shard); #endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/src/arena.c b/src/arena.c index 8f306604..2f626fed 100644 --- a/src/arena.c +++ b/src/arena.c @@ -95,9 +95,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); - locked_inc_zu_unsynchronized(&astats->mapped, base_mapped - + locked_read_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.mapped)); + locked_inc_zu_unsynchronized(&astats->pa_shard_stats.mapped, + base_mapped + locked_read_zu(tsdn, + LOCKEDINT_MTX(*arena->pa_shard.stats_mtx), + &arena->pa_shard.stats->mapped)); locked_inc_zu_unsynchronized(&astats->retained, ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE); @@ -488,7 +489,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, if (mapped_add != 0) { locked_inc_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.mapped, mapped_add); + &arena->pa_shard.stats->mapped, mapped_add); } LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } @@ -919,7 +920,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), &decay->stats->purged, npurged); locked_dec_zu(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.mapped, nunmapped << LG_PAGE); + &arena->pa_shard.stats->mapped, nunmapped << LG_PAGE); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } @@ -1240,7 +1241,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, true, szind, &zero); if (config_stats && slab != NULL) { - arena_stats_mapped_add(tsdn, &arena->stats, + pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, bin_info->slab_size); } @@ -2039,7 +2040,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { } if (pa_shard_init(tsdn, &arena->pa_shard, base, ind, - &arena->stats.pa_shard_stats)) { + &arena->stats.pa_shard_stats, LOCKEDINT_MTX(arena->stats.mtx))) { goto label_error; } diff --git a/src/ctl.c b/src/ctl.c index 26d86da0..122856c0 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -861,8 +861,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, ctl_arena_stats_t *astats = ctl_arena->astats; if (!destroyed) { - ctl_accum_locked_zu(&sdstats->astats.mapped, - &astats->astats.mapped); + ctl_accum_locked_zu( + &sdstats->astats.pa_shard_stats.mapped, + &astats->astats.pa_shard_stats.mapped); ctl_accum_locked_zu(&sdstats->astats.retained, &astats->astats.retained); ctl_accum_atomic_zu(&sdstats->astats.edata_avail, @@ -1103,7 +1104,7 @@ ctl_refresh(tsdn_t *tsdn) { ctl_stats->resident = atomic_load_zu( &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); ctl_stats->mapped = locked_read_atomic_zu( - &ctl_sarena->astats->astats.mapped); + &ctl_sarena->astats->astats.pa_shard_stats.mapped); ctl_stats->retained = locked_read_atomic_zu( &ctl_sarena->astats->astats.retained); @@ -2914,8 +2915,8 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.mapped), - size_t) + locked_read_atomic_zu(&arenas_i( + mib[2])->astats->astats.pa_shard_stats.mapped), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_retained, locked_read_atomic_zu(&arenas_i(mib[2])->astats->astats.retained), size_t) diff --git a/src/large.c b/src/large.c index fa03a50e..57bf6748 100644 --- a/src/large.c +++ b/src/large.c @@ -151,7 +151,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, emap_remap(tsdn, &emap_global, edata, szind, false); if (config_stats && new_mapping) { - arena_stats_mapped_add(tsdn, &arena->stats, trailsize); + pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, trailsize); } if (zero) { diff --git a/src/pa.c b/src/pa.c index 35d3335f..e4dbb040 100644 --- a/src/pa.c +++ b/src/pa.c @@ -3,7 +3,7 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, - pa_shard_stats_t *stats) { + pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx) { /* This will change eventually, but for now it should hold. */ assert(base_ind_get(base) == ind); /* @@ -44,6 +44,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED); + shard->stats_mtx = stats_mtx; shard->stats = stats; memset(shard->stats, 0, sizeof(*shard->stats));