PA: Add a stats type.

This commit is contained in:
David Goldblatt 2020-03-08 13:08:15 -07:00 committed by David Goldblatt
parent 688fb3eb89
commit 32cb7c2f0b
6 changed files with 35 additions and 11 deletions

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
@ -112,8 +113,12 @@ struct arena_stats_s {
arena_stats_u64_t nflushes_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
/*
* The stats logically owned by the pa_shard in the same arena. This
* lives here only because it's convenient for the purposes of the ctl
* module -- it only knows about the single arena_stats.
*/
pa_shard_stats_t pa_shard_stats;
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */

View File

@ -1,11 +1,20 @@
#ifndef JEMALLOC_INTERNAL_PA_H
#define JEMALLOC_INTERNAL_PA_H
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
/*
* The page allocator; responsible for acquiring pages of memory for
* allocations.
*/
typedef struct pa_shard_stats_s pa_shard_stats_t;
struct pa_shard_stats_s {
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
};
typedef struct pa_shard_s pa_shard_t;
struct pa_shard_s {
/*
@ -20,9 +29,12 @@ struct pa_shard_s {
/* The source of edata_t objects. */
edata_cache_t edata_cache;
pa_shard_stats_t *stats;
};
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind);
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats);
#endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -131,8 +131,9 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
ecache_npages_get(&arena->pa_shard.ecache_dirty) +
ecache_npages_get(&arena->pa_shard.ecache_muzzy)) << LG_PAGE)));
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
&arena->stats.abandoned_vm, ATOMIC_RELAXED));
arena_stats_accum_zu(&astats->pa_shard_stats.abandoned_vm,
atomic_load_zu(&arena->stats.pa_shard_stats.abandoned_vm,
ATOMIC_RELAXED));
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
@ -2027,7 +2028,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
if (pa_shard_init(tsdn, &arena->pa_shard, base, ind)) {
if (pa_shard_init(tsdn, &arena->pa_shard, base, ind,
&arena->stats.pa_shard_stats)) {
goto label_error;
}

View File

@ -939,8 +939,8 @@ MUTEX_PROF_ARENA_MUTEXES
&astats->astats.nrequests_large);
ctl_accum_arena_stats_u64(&sdstats->astats.nflushes_large,
&astats->astats.nflushes_large);
accum_atomic_zu(&sdstats->astats.abandoned_vm,
&astats->astats.abandoned_vm);
accum_atomic_zu(&sdstats->astats.pa_shard_stats.abandoned_vm,
&astats->astats.pa_shard_stats.abandoned_vm);
accum_atomic_zu(&sdstats->astats.tcache_bytes,
&astats->astats.tcache_bytes);
@ -2962,7 +2962,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
atomic_load_zu(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.abandoned_vm,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,

View File

@ -199,7 +199,7 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool growing_retained) {
size_t sz = edata_size_get(edata);
if (config_stats) {
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
arena_stats_accum_zu(&arena->pa_shard.stats->abandoned_vm, sz);
}
/*
* Leak extent after making sure its pages have already been purged, so

View File

@ -2,7 +2,8 @@
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind) {
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats) {
/* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind);
/*
@ -37,5 +38,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind) {
return true;
}
shard->stats = stats;
memset(shard->stats, 0, sizeof(*shard->stats));
return false;
}