HPA: Add purge stats.

This commit is contained in:
David Goldblatt 2020-12-03 16:09:50 -08:00 committed by David Goldblatt
parent 746ea3de6f
commit b25ee5d88e
4 changed files with 97 additions and 7 deletions

View File

@ -9,12 +9,40 @@
typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
struct hpa_shard_nonderived_stats_s {
/*
* The number of times we've purged a hugepage. Each eviction purges a
* single hugepage.
* The number of times we've fully purged a hugepage and evicted it from
* the psset.
*
* Guarded by the grow mutex.
* Guarded by grow_mtx.
*/
uint64_t nevictions;
/*
* The number of times we've purged within a hugepage.
*
* Guarded by mtx.
*/
uint64_t npurge_passes;
/*
* The number of individual purge calls we perform (which should always
* be bigger than npurge_passes, since each pass purges at least one
* extent within a hugepage.
*
* Guarded by mtx.
*/
uint64_t npurges;
/*
* The number of times we've hugified a pageslab.
*
* Guarded by mtx.
*/
uint64_t nhugifies;
/*
* The number of times we've dehugified a pageslab.
*
* Guarded by mtx.
*/
uint64_t ndehugifies;
};
/* Completely derived; only used by CTL. */

View File

@ -227,6 +227,10 @@ CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
CTL_PROTO(stats_arenas_i_hpa_shard_nevictions)
CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ninactive_huge)
@ -695,7 +699,12 @@ static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
stats_arenas_i_hpa_shard_full_slabs)},
{NAME("nonfull_slabs"), CHILD(indexed,
stats_arenas_i_hpa_shard_nonfull_slabs)},
{NAME("nevictions"), CTL(stats_arenas_i_hpa_shard_nevictions)}
{NAME("nevictions"), CTL(stats_arenas_i_hpa_shard_nevictions)},
{NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
{NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
{NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
{NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
};
static const ctl_named_node_t stats_arenas_i_node[] = {
@ -3507,6 +3516,14 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nevictions,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.nevictions, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
/* Full, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,

View File

@ -74,7 +74,12 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
shard->eden_len = 0;
shard->ind = ind;
shard->emap = emap;
shard->stats.nevictions = 0;
shard->stats.npurge_passes = 0;
shard->stats.npurges = 0;
shard->stats.nhugifies = 0;
shard->stats.ndehugifies = 0;
/*
* Fill these in last, so that if an hpa_shard gets used despite
@ -99,6 +104,10 @@ static void
hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
hpa_shard_nonderived_stats_t *src) {
dst->nevictions += src->nevictions;
dst->npurge_passes += src->npurge_passes;
dst->npurges += src->npurges;
dst->nhugifies += src->nhugifies;
dst->ndehugifies += src->ndehugifies;
}
void
@ -237,6 +246,7 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
/* Do the metadata update bit while holding the lock. */
hpdata_purge_state_t purge_state;
hpdata_purge_begin(ps, &purge_state);
shard->stats.npurge_passes++;
/*
* Dehugifying can only happen on the first loop iteration,
@ -247,6 +257,7 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
bool needs_dehugify = false;
if (hpdata_huge_get(ps)) {
needs_dehugify = true;
shard->stats.ndehugifies++;
hpdata_dehugify(ps);
}
@ -258,16 +269,19 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
}
size_t total_purged = 0;
uint64_t purges_this_pass = 0;
void *purge_addr;
size_t purge_size;
while (hpdata_purge_next(ps, &purge_state, &purge_addr,
&purge_size)) {
purges_this_pass++;
pages_purge_forced(purge_addr, purge_size);
total_purged += purge_size;
}
/* Reacquire to finish our metadata update. */
malloc_mutex_lock(tsdn, &shard->mtx);
shard->stats.npurges += purges_this_pass;
hpdata_purge_end(ps, &purge_state);
assert(total_purged <= HUGEPAGE);
@ -357,6 +371,7 @@ hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom)
bool hugify = hpa_should_hugify(shard, ps);
if (hugify) {
hpdata_hugify_begin(ps);
shard->stats.nhugifies++;
}
psset_insert(&shard->psset, ps);

View File

@ -791,6 +791,21 @@ stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_row_init(&row);
uint64_t nevictions;
uint64_t npurge_passes;
uint64_t npurges;
uint64_t nhugifies;
uint64_t ndehugifies;
CTL_M2_GET("stats.arenas.0.hpa_shard.nevictions",
i, &nevictions, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
i, &npurge_passes, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
i, &npurges, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies",
i, &nhugifies, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies",
i, &ndehugifies, uint64_t);
size_t npageslabs_huge;
size_t nactive_huge;
@ -800,9 +815,6 @@ stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
size_t nactive_nonhuge;
size_t ninactive_nonhuge;
CTL_M2_GET("stats.arenas.0.hpa_shard.nevictions",
i, &nevictions, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
i, &npageslabs_huge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
@ -825,17 +837,35 @@ stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_table_printf(emitter,
"HPA shard stats:\n"
" Evictions: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
"\n"
" In full slabs:\n"
" npageslabs: %zu huge, %zu nonhuge\n"
" nactive: %zu huge, %zu nonhuge \n"
" ninactive: %zu huge, %zu nonhuge \n",
nevictions, rate_per_second(nevictions, uptime),
npurge_passes, rate_per_second(npurge_passes, uptime),
npurges, rate_per_second(npurges, uptime),
nhugifies, rate_per_second(nhugifies, uptime),
ndehugifies, rate_per_second(ndehugifies, uptime),
npageslabs_huge, npageslabs_nonhuge,
nactive_huge, nactive_nonhuge,
ninactive_huge, ninactive_nonhuge);
emitter_json_object_kv_begin(emitter, "hpa_shard");
emitter_json_kv(emitter, "nevictions", emitter_type_uint64,
&nevictions);
emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
&npurge_passes);
emitter_json_kv(emitter, "npurges", emitter_type_uint64,
&npurges);
emitter_json_kv(emitter, "nhugifies", emitter_type_uint64,
&nhugifies);
emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
&ndehugifies);
emitter_json_object_kv_begin(emitter, "full_slabs");
emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
&npageslabs_huge);