psset: Move in stats.

A later change will benefit from having these functions pulled into a
psset-module set of functions.
This commit is contained in:
David Goldblatt 2020-11-10 16:23:03 -08:00 committed by David Goldblatt
parent d0a991d47b
commit c1b2a77933
7 changed files with 82 additions and 63 deletions

View File

@ -35,8 +35,7 @@ struct hpa_s {
/* Used only by CTL; not actually stored here (i.e., all derived). */
typedef struct hpa_shard_stats_s hpa_shard_stats_t;
struct hpa_shard_stats_s {
psset_bin_stats_t psset_full_slab_stats;
psset_bin_stats_t psset_slab_stats[PSSET_NPSIZES];
psset_stats_t psset_stats;
};
typedef struct hpa_shard_s hpa_shard_t;
@ -89,6 +88,9 @@ bool hpa_init(hpa_t *hpa, base_t *base, emap_t *emap,
bool hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa,
edata_cache_t *edata_cache, unsigned ind, size_t ps_goal,
size_t ps_alloc_max, size_t small_max, size_t large_min);
void hpa_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
void hpa_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst);
/*
* Notify the shard that we won't use it for allocations much longer. Due to
* the possibility of races, we don't actually prevent allocations; just flush

View File

@ -31,12 +31,16 @@ struct psset_bin_stats_s {
size_t ninactive;
};
static inline void
psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
dst->npageslabs += src->npageslabs;
dst->nactive += src->nactive;
dst->ninactive += src->ninactive;
}
/* Used only by CTL; not actually stored here (i.e., all derived). */
typedef struct psset_stats_s psset_stats_t;
struct psset_stats_s {
/*
* Full slabs don't live in any edata heap. But we still track their
* stats.
*/
psset_bin_stats_t full_slabs;
psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES];
};
typedef struct psset_s psset_t;
struct psset_s {
@ -46,18 +50,14 @@ struct psset_s {
*/
edata_age_heap_t pageslabs[PSSET_NPSIZES];
bitmap_t bitmap[BITMAP_GROUPS(PSSET_NPSIZES)];
/*
* Full slabs don't live in any edata heap. But we still track their
* stats.
*/
psset_bin_stats_t full_slab_stats;
psset_bin_stats_t slab_stats[PSSET_NPSIZES];
psset_stats_t stats;
/* How many alloc_new calls have happened? */
uint64_t age_counter;
};
void psset_init(psset_t *psset);
void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
void psset_insert(psset_t *psset, edata_t *ps);
void psset_remove(psset_t *psset, edata_t *ps);

View File

@ -1104,14 +1104,7 @@ MUTEX_PROF_ARENA_MUTEXES
}
/* Merge HPA stats. */
psset_bin_stats_accum(&sdstats->hpastats.psset_full_slab_stats,
&astats->hpastats.psset_full_slab_stats);
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
psset_bin_stats_accum(
&sdstats->hpastats.psset_slab_stats[i],
&astats->hpastats.psset_slab_stats[i]);
}
hpa_stats_accum(&sdstats->hpastats, &astats->hpastats);
sec_stats_accum(&sdstats->secstats, &astats->secstats);
}
}
@ -3375,21 +3368,21 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs,
arenas_i(mib[2])->astats->hpastats.psset_full_slab_stats.npageslabs,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs.npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive,
arenas_i(mib[2])->astats->hpastats.psset_full_slab_stats.nactive, size_t);
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs.nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ninactive,
arenas_i(mib[2])->astats->hpastats.psset_full_slab_stats.ninactive, size_t);
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs.ninactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs,
arenas_i(mib[2])->astats->hpastats.psset_slab_stats[mib[5]].npageslabs,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive,
arenas_i(mib[2])->astats->hpastats.psset_slab_stats[mib[5]].nactive,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]].nactive,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ninactive,
arenas_i(mib[2])->astats->hpastats.psset_slab_stats[mib[5]].ninactive,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]].ninactive,
size_t);
static const ctl_named_node_t *

View File

@ -89,6 +89,24 @@ hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa, edata_cache_t *edata_cache,
return false;
}
/*
* Note that the stats functions here follow the usual stats naming conventions;
* "merge" obtains the stats from some live object of instance, while "accum"
* only combines the stats from one stats objet to another. Hence the lack of
* locking here.
*/
void
hpa_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
psset_stats_accum(&dst->psset_stats, &src->psset_stats);
}
void
hpa_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst) {
malloc_mutex_lock(tsdn, &shard->mtx);
psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
static edata_t *
hpa_alloc_central(tsdn_t *tsdn, hpa_shard_t *shard, size_t size_min,
size_t size_goal) {
@ -415,10 +433,10 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
PAGE);
malloc_mutex_unlock(tsdn, &shard->mtx);
assert(psset_empty);
hpa_shard_assert_stats_empty(&shard->psset.full_slab_stats);
hpa_shard_assert_stats_empty(&shard->psset.stats.full_slabs);
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
hpa_shard_assert_stats_empty(
&shard->psset.slab_stats[i]);
&shard->psset.stats.nonfull_slabs[i]);
}
}
}

View File

@ -150,15 +150,7 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
}
if (shard->ever_used_hpa) {
malloc_mutex_lock(tsdn, &shard->hpa_shard.mtx);
psset_bin_stats_accum(&hpa_stats_out->psset_full_slab_stats,
&shard->hpa_shard.psset.full_slab_stats);
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
psset_bin_stats_accum(
&hpa_stats_out->psset_slab_stats[i],
&shard->hpa_shard.psset.slab_stats[i]);
}
malloc_mutex_unlock(tsdn, &shard->hpa_shard.mtx);
hpa_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out);
}
}

View File

@ -14,17 +14,26 @@ psset_init(psset_t *psset) {
edata_age_heap_new(&psset->pageslabs[i]);
}
bitmap_init(psset->bitmap, &psset_bitmap_info, /* fill */ true);
psset->full_slab_stats.npageslabs = 0;
psset->full_slab_stats.nactive = 0;
psset->full_slab_stats.ninactive = 0;
for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
psset->slab_stats[i].npageslabs = 0;
psset->slab_stats[i].nactive = 0;
psset->slab_stats[i].ninactive = 0;
}
memset(&psset->stats, 0, sizeof(psset->stats));
psset->age_counter = 0;
}
static void
psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
dst->npageslabs += src->npageslabs;
dst->nactive += src->nactive;
dst->ninactive += src->ninactive;
}
void
psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
psset_bin_stats_accum(&dst->full_slabs, &src->full_slabs);
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
psset_bin_stats_accum(&dst->nonfull_slabs[i],
&src->nonfull_slabs[i]);
}
}
/*
* The stats maintenance strategy is simple, but not necessarily obvious.
* edata_nfree and the bitmap must remain consistent at all times. If they
@ -50,13 +59,15 @@ psset_bin_stats_adjust(psset_bin_stats_t *binstats, edata_t *ps, bool inc) {
static void
psset_edata_heap_remove(psset_t *psset, pszind_t pind, edata_t *ps) {
edata_age_heap_remove(&psset->pageslabs[pind], ps);
psset_bin_stats_adjust(&psset->slab_stats[pind], ps, /* inc */ false);
psset_bin_stats_adjust(&psset->stats.nonfull_slabs[pind], ps,
/* inc */ false);
}
static void
psset_edata_heap_insert(psset_t *psset, pszind_t pind, edata_t *ps) {
edata_age_heap_insert(&psset->pageslabs[pind], ps);
psset_bin_stats_adjust(&psset->slab_stats[pind], ps, /* inc */ true);
psset_bin_stats_adjust(&psset->stats.nonfull_slabs[pind], ps,
/* inc */ true);
}
JEMALLOC_ALWAYS_INLINE void
@ -75,7 +86,7 @@ psset_insert(psset_t *psset, edata_t *ps) {
* We don't ned to track full slabs; just pretend to for stats
* purposes. See the comment at psset_bin_stats_adjust.
*/
psset_bin_stats_adjust(&psset->full_slab_stats, ps,
psset_bin_stats_adjust(&psset->stats.full_slabs, ps,
/* inc */ true);
return;
}
@ -96,7 +107,7 @@ psset_remove(psset_t *psset, edata_t *ps) {
size_t longest_free_range = edata_longest_free_range_get(ps);
if (longest_free_range == 0) {
psset_bin_stats_adjust(&psset->full_slab_stats, ps,
psset_bin_stats_adjust(&psset->stats.full_slabs, ps,
/* inc */ true);
return;
}
@ -214,7 +225,7 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
}
edata_longest_free_range_set(ps, (uint32_t)largest_unchosen_range);
if (largest_unchosen_range == 0) {
psset_bin_stats_adjust(&psset->full_slab_stats, ps,
psset_bin_stats_adjust(&psset->stats.full_slabs, ps,
/* inc */ true);
} else {
psset_insert(psset, ps);
@ -265,15 +276,15 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
fb_unset_range(ps_fb, ps_npages, begin, len);
if (ps_old_longest_free_range == 0) {
/* We were in the (imaginary) full bin; update stats for it. */
psset_bin_stats_adjust(&psset->full_slab_stats, ps,
psset_bin_stats_adjust(&psset->stats.full_slabs, ps,
/* inc */ false);
} else {
/*
* The edata is still in the bin, need to update its
* contribution.
*/
psset->slab_stats[old_pind].nactive -= len;
psset->slab_stats[old_pind].ninactive += len;
psset->stats.nonfull_slabs[old_pind].nactive -= len;
psset->stats.nonfull_slabs[old_pind].ninactive += len;
}
/*
* Note that we want to do this after the stats updates, since if it was

View File

@ -307,14 +307,14 @@ stats_expect_empty(psset_bin_stats_t *stats) {
static void
stats_expect(psset_t *psset, size_t nactive) {
if (nactive == PAGESLAB_PAGES) {
expect_zu_eq(1, psset->full_slab_stats.npageslabs,
expect_zu_eq(1, psset->stats.full_slabs.npageslabs,
"Expected a full slab");
expect_zu_eq(PAGESLAB_PAGES, psset->full_slab_stats.nactive,
expect_zu_eq(PAGESLAB_PAGES, psset->stats.full_slabs.nactive,
"Should have exactly filled the bin");
expect_zu_eq(0, psset->full_slab_stats.ninactive,
expect_zu_eq(0, psset->stats.full_slabs.ninactive,
"Should never have inactive pages in a full slab");
} else {
stats_expect_empty(&psset->full_slab_stats);
stats_expect_empty(&psset->stats.full_slabs);
}
size_t ninactive = PAGESLAB_PAGES - nactive;
pszind_t nonempty_pind = PSSET_NPSIZES;
@ -324,14 +324,17 @@ stats_expect(psset_t *psset, size_t nactive) {
}
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
if (i == nonempty_pind) {
assert_zu_eq(1, psset->slab_stats[i].npageslabs,
assert_zu_eq(1,
psset->stats.nonfull_slabs[i].npageslabs,
"Should have found a slab");
expect_zu_eq(nactive, psset->slab_stats[i].nactive,
expect_zu_eq(nactive,
psset->stats.nonfull_slabs[i].nactive,
"Mismatch in active pages");
expect_zu_eq(ninactive, psset->slab_stats[i].ninactive,
expect_zu_eq(ninactive,
psset->stats.nonfull_slabs[i].ninactive,
"Mismatch in inactive pages");
} else {
stats_expect_empty(&psset->slab_stats[i]);
stats_expect_empty(&psset->stats.nonfull_slabs[i]);
}
}
}