HPA: Add an nevictions counter.

I.e. the number of times we've purged a hugepage-sized region.
This commit is contained in:
David Goldblatt 2020-11-30 19:06:50 -08:00 committed by David Goldblatt
parent fffcefed33
commit 3ed0b4e8a3
4 changed files with 39 additions and 10 deletions

View File

@ -6,10 +6,12 @@
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
/* Used only by CTL; not actually stored here (i.e., all derived). */
/* Completely derived; only used by CTL. */
typedef struct hpa_shard_stats_s hpa_shard_stats_t;
struct hpa_shard_stats_s {
psset_stats_t psset_stats;
/* The stat version of the nevictions counter. */
uint64_t nevictions;
};
typedef struct hpa_shard_s hpa_shard_t;
@ -69,6 +71,14 @@ struct hpa_shard_s {
/* The arena ind we're associated with. */
unsigned ind;
emap_t *emap;
/*
* The number of times we've purged a hugepage. Each eviction purges a
* single hugepage.
*
* Guarded by the grow mutex.
*/
uint64_t nevictions;
};
/*

View File

@ -220,6 +220,7 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
CTL_PROTO(stats_arenas_i_hpa_shard_nevictions)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ninactive_huge)
@ -655,7 +656,8 @@ static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
{NAME("full_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_full_slabs)},
{NAME("nonfull_slabs"), CHILD(indexed,
stats_arenas_i_hpa_shard_nonfull_slabs)}
stats_arenas_i_hpa_shard_nonfull_slabs)},
{NAME("nevictions"), CTL(stats_arenas_i_hpa_shard_nevictions)}
};
static const ctl_named_node_t stats_arenas_i_node[] = {
@ -3372,6 +3374,9 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node;
}
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nevictions,
arenas_i(mib[2])->astats->hpastats.nevictions, uint64_t);
/* Full, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs.npageslabs_huge,

View File

@ -74,6 +74,7 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
shard->eden_len = 0;
shard->ind = ind;
shard->emap = emap;
shard->nevictions = 0;
/*
* Fill these in last, so that if an hpa_shard gets used despite
@ -97,14 +98,18 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
void
hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
psset_stats_accum(&dst->psset_stats, &src->psset_stats);
dst->nevictions += src->nevictions;
}
void
hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
hpa_shard_stats_t *dst) {
malloc_mutex_lock(tsdn, &shard->grow_mtx);
malloc_mutex_lock(tsdn, &shard->mtx);
psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
dst->nevictions += shard->nevictions;
malloc_mutex_unlock(tsdn, &shard->mtx);
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
}
static hpdata_t *
@ -238,6 +243,7 @@ hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
hpa_dehugify(ps);
malloc_mutex_lock(tsdn, &shard->grow_mtx);
shard->nevictions++;
hpdata_list_prepend(&shard->unused_slabs, ps);
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
}
@ -353,6 +359,7 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
malloc_mutex_lock(tsdn, &shard->mtx);
edata = edata_cache_small_get(tsdn, &shard->ecs);
if (edata == NULL) {
shard->nevictions++;
malloc_mutex_unlock(tsdn, &shard->mtx);
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
hpa_handle_ps_eviction(tsdn, shard, ps);
@ -371,11 +378,8 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
hpdata_unreserve(ps, edata_addr_get(edata),
edata_size_get(edata));
edata_cache_small_put(tsdn, &shard->ecs, edata);
/*
* Technically the same as fallthrough at the time of this
* writing, but consistent with the error handling in the rest
* of the function.
*/
shard->nevictions++;
malloc_mutex_unlock(tsdn, &shard->mtx);
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
hpa_handle_ps_eviction(tsdn, shard, ps);

View File

@ -661,12 +661,14 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
}
static void
stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i) {
stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_row_t header_row;
emitter_row_init(&header_row);
emitter_row_t row;
emitter_row_init(&row);
uint64_t nevictions;
size_t npageslabs_huge;
size_t nactive_huge;
size_t ninactive_huge;
@ -675,6 +677,9 @@ stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i) {
size_t nactive_nonhuge;
size_t ninactive_nonhuge;
CTL_M2_GET("stats.arenas.0.hpa_shard.nevictions",
i, &nevictions, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
i, &npageslabs_huge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
@ -696,13 +701,18 @@ stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i) {
emitter_table_printf(emitter,
"HPA shard stats:\n"
" Evictions: %" FMTu64 " (%" FMTu64 " / sec)\n"
" In full slabs:\n"
" npageslabs: %zu huge, %zu nonhuge\n"
" nactive: %zu huge, %zu nonhuge \n"
" ninactive: %zu huge, %zu nonhuge \n",
npageslabs_huge, npageslabs_nonhuge, nactive_huge, nactive_nonhuge,
nevictions, rate_per_second(nevictions, uptime),
npageslabs_huge, npageslabs_nonhuge,
nactive_huge, nactive_nonhuge,
ninactive_huge, ninactive_nonhuge);
emitter_json_object_kv_begin(emitter, "hpa_shard");
emitter_json_kv(emitter, "nevictions", emitter_type_uint64,
&nevictions);
emitter_json_object_kv_begin(emitter, "full_slabs");
emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
&npageslabs_huge);
@ -1137,7 +1147,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
stats_arena_extents_print(emitter, i);
}
if (hpa) {
stats_arena_hpa_shard_print(emitter, i);
stats_arena_hpa_shard_print(emitter, i, uptime);
}
}