PA->PAC: Move in stats.
This commit is contained in:
committed by
David Goldblatt
parent
db211eefbf
commit
7391382349
18
src/arena.c
18
src/arena.c
@@ -80,7 +80,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
||||
pa_extent_stats_t *estats) {
|
||||
pac_estats_t *estats) {
|
||||
cassert(config_stats);
|
||||
|
||||
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
|
||||
@@ -89,8 +89,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
size_t base_allocated, base_resident, base_mapped, metadata_thp;
|
||||
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
|
||||
&base_mapped, &metadata_thp);
|
||||
size_t pa_mapped = pa_shard_pa_mapped(&arena->pa_shard);
|
||||
astats->mapped += base_mapped + pa_mapped;
|
||||
size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
|
||||
astats->mapped += base_mapped + pac_mapped_sz;
|
||||
astats->resident += base_resident;
|
||||
|
||||
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
||||
@@ -423,7 +423,7 @@ arena_decide_unforced_decay_purge_setting(bool is_background_thread) {
|
||||
|
||||
static bool
|
||||
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, ssize_t decay_ms) {
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache, ssize_t decay_ms) {
|
||||
if (!decay_ms_valid(decay_ms)) {
|
||||
return true;
|
||||
}
|
||||
@@ -454,7 +454,7 @@ bool
|
||||
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
||||
ssize_t decay_ms) {
|
||||
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
|
||||
&arena->pa_shard.stats->decay_dirty,
|
||||
&arena->pa_shard.pac.stats->decay_dirty,
|
||||
&arena->pa_shard.pac.ecache_dirty, decay_ms);
|
||||
}
|
||||
|
||||
@@ -462,13 +462,13 @@ bool
|
||||
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
||||
ssize_t decay_ms) {
|
||||
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
|
||||
&arena->pa_shard.stats->decay_muzzy,
|
||||
&arena->pa_shard.pac.stats->decay_muzzy,
|
||||
&arena->pa_shard.pac.ecache_muzzy, decay_ms);
|
||||
}
|
||||
|
||||
static bool
|
||||
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
bool is_background_thread, bool all) {
|
||||
if (all) {
|
||||
malloc_mutex_lock(tsdn, &decay->mtx);
|
||||
@@ -521,7 +521,7 @@ static bool
|
||||
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
bool all) {
|
||||
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
|
||||
&arena->pa_shard.stats->decay_dirty,
|
||||
&arena->pa_shard.pac.stats->decay_dirty,
|
||||
&arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
|
||||
}
|
||||
|
||||
@@ -532,7 +532,7 @@ arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
return false;
|
||||
}
|
||||
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
|
||||
&arena->pa_shard.stats->decay_muzzy,
|
||||
&arena->pa_shard.pac.stats->decay_muzzy,
|
||||
&arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
|
||||
}
|
||||
|
||||
|
62
src/ctl.c
62
src/ctl.c
@@ -831,7 +831,7 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
|
||||
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
|
||||
sizeof(arena_stats_large_t));
|
||||
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
|
||||
sizeof(pa_extent_stats_t));
|
||||
sizeof(pac_estats_t));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -889,32 +889,31 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
|
||||
|
||||
if (!destroyed) {
|
||||
sdstats->astats.mapped += astats->astats.mapped;
|
||||
sdstats->astats.pa_shard_stats.retained
|
||||
+= astats->astats.pa_shard_stats.retained;
|
||||
sdstats->astats.pa_shard_stats.pac_stats.retained
|
||||
+= astats->astats.pa_shard_stats.pac_stats.retained;
|
||||
sdstats->astats.pa_shard_stats.edata_avail
|
||||
+= astats->astats.pa_shard_stats.edata_avail;
|
||||
}
|
||||
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
|
||||
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
|
||||
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
|
||||
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
|
||||
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.decay_dirty.npurge,
|
||||
&astats->astats.pa_shard_stats.decay_dirty.npurge);
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
|
||||
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.decay_dirty.nmadvise,
|
||||
&astats->astats.pa_shard_stats.decay_dirty.nmadvise);
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
|
||||
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.decay_dirty.purged,
|
||||
&astats->astats.pa_shard_stats.decay_dirty.purged);
|
||||
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.decay_muzzy.npurge,
|
||||
&astats->astats.pa_shard_stats.decay_muzzy.npurge);
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.decay_muzzy.nmadvise,
|
||||
&astats->astats.pa_shard_stats.decay_muzzy.nmadvise);
|
||||
ctl_accum_locked_u64(
|
||||
&sdstats->astats.pa_shard_stats.decay_muzzy.purged,
|
||||
&astats->astats.pa_shard_stats.decay_muzzy.purged);
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
|
||||
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
|
||||
|
||||
#define OP(mtx) malloc_mutex_prof_merge( \
|
||||
&(sdstats->astats.mutex_prof_data[ \
|
||||
@@ -957,8 +956,8 @@ MUTEX_PROF_ARENA_MUTEXES
|
||||
+= astats->astats.nrequests_large;
|
||||
sdstats->astats.nflushes_large += astats->astats.nflushes_large;
|
||||
ctl_accum_atomic_zu(
|
||||
&sdstats->astats.pa_shard_stats.abandoned_vm,
|
||||
&astats->astats.pa_shard_stats.abandoned_vm);
|
||||
&sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
|
||||
&astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
|
||||
|
||||
sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
|
||||
|
||||
@@ -1117,8 +1116,8 @@ ctl_refresh(tsdn_t *tsdn) {
|
||||
ctl_stats->metadata_thp =
|
||||
ctl_sarena->astats->astats.metadata_thp;
|
||||
ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
|
||||
ctl_stats->retained =
|
||||
ctl_sarena->astats->astats.pa_shard_stats.retained;
|
||||
ctl_stats->retained = ctl_sarena->astats->astats
|
||||
.pa_shard_stats.pac_stats.retained;
|
||||
|
||||
ctl_background_thread_stats_read(tsdn);
|
||||
|
||||
@@ -2976,35 +2975,34 @@ CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
|
||||
arenas_i(mib[2])->astats->astats.mapped, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
|
||||
arenas_i(mib[2])->astats->astats.pa_shard_stats.retained,
|
||||
size_t)
|
||||
arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
|
||||
arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
|
||||
locked_read_u64_unsynchronized(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_dirty.npurge),
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
|
||||
uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
|
||||
locked_read_u64_unsynchronized(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_dirty.nmadvise),
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
|
||||
uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
|
||||
locked_read_u64_unsynchronized(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_dirty.purged),
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
|
||||
uint64_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
|
||||
locked_read_u64_unsynchronized(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_muzzy.npurge),
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
|
||||
uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
|
||||
locked_read_u64_unsynchronized(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_muzzy.nmadvise),
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
|
||||
uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
|
||||
locked_read_u64_unsynchronized(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_muzzy.purged),
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
|
||||
uint64_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
|
||||
@@ -3022,7 +3020,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
|
||||
size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
|
||||
atomic_load_zu(
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.abandoned_vm,
|
||||
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
|
||||
ATOMIC_RELAXED), size_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
|
||||
|
17
src/extent.c
17
src/extent.c
@@ -196,7 +196,7 @@ extents_abandon_vm(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata, bool growing_retained) {
|
||||
size_t sz = edata_size_get(edata);
|
||||
if (config_stats) {
|
||||
atomic_fetch_add_zu(&shard->stats->abandoned_vm, sz,
|
||||
atomic_fetch_add_zu(&shard->pac.stats->abandoned_vm, sz,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
/*
|
||||
@@ -938,21 +938,20 @@ extent_maximally_purge(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
|
||||
extent_dalloc_wrapper(tsdn, shard, ehooks, edata);
|
||||
if (config_stats) {
|
||||
/* Update stats accordingly. */
|
||||
LOCKEDINT_MTX_LOCK(tsdn, *shard->stats_mtx);
|
||||
LOCKEDINT_MTX_LOCK(tsdn, *shard->pac.stats_mtx);
|
||||
locked_inc_u64(tsdn,
|
||||
LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_dirty.nmadvise, 1);
|
||||
LOCKEDINT_MTX(*shard->pac.stats_mtx),
|
||||
&shard->pac.stats->decay_dirty.nmadvise, 1);
|
||||
locked_inc_u64(tsdn,
|
||||
LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_dirty.purged,
|
||||
LOCKEDINT_MTX(*shard->pac.stats_mtx),
|
||||
&shard->pac.stats->decay_dirty.purged,
|
||||
extent_size >> LG_PAGE);
|
||||
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
|
||||
atomic_fetch_sub_zu(&shard->stats->pa_mapped, extent_size,
|
||||
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->pac.stats_mtx);
|
||||
atomic_fetch_sub_zu(&shard->pac.stats->pac_mapped, extent_size,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Does the metadata management portions of putting an unused extent into the
|
||||
* given ecache_t (coalesces and inserts into the eset).
|
||||
|
19
src/pa.c
19
src/pa.c
@@ -30,7 +30,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
|
||||
return true;
|
||||
}
|
||||
if (pac_init(tsdn, &shard->pac, ind, emap, &shard->edata_cache,
|
||||
cur_time, dirty_decay_ms, muzzy_decay_ms)) {
|
||||
cur_time, dirty_decay_ms, muzzy_decay_ms, &stats->pac_stats,
|
||||
stats_mtx)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -106,7 +107,7 @@ ecache_pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
edata = ecache_alloc_grow(tsdn, shard, ehooks,
|
||||
&shard->pac.ecache_retained, NULL, size, alignment, zero);
|
||||
if (config_stats && edata != NULL) {
|
||||
atomic_fetch_add_zu(&shard->stats->pa_mapped, size,
|
||||
atomic_fetch_add_zu(&shard->pac.stats->pac_mapped, size,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
}
|
||||
@@ -170,7 +171,7 @@ ecache_pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
return true;
|
||||
}
|
||||
if (config_stats && mapped_add > 0) {
|
||||
atomic_fetch_add_zu(&shard->stats->pa_mapped, mapped_add,
|
||||
atomic_fetch_add_zu(&shard->pac.stats->pac_mapped, mapped_add,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
return false;
|
||||
@@ -288,7 +289,7 @@ pa_stash_decayed(tsdn_t *tsdn, pa_shard_t *shard, ecache_t *ecache,
|
||||
|
||||
static size_t
|
||||
pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
|
||||
edata_list_inactive_t *decay_extents) {
|
||||
bool err;
|
||||
|
||||
@@ -343,7 +344,7 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
locked_inc_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&decay_stats->purged, npurged);
|
||||
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
|
||||
atomic_fetch_sub_zu(&shard->stats->pa_mapped,
|
||||
atomic_fetch_sub_zu(&shard->pac.stats->pac_mapped,
|
||||
nunmapped << LG_PAGE, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
@@ -359,7 +360,7 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
*/
|
||||
static void
|
||||
pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
|
||||
size_t npages_limit, size_t npages_decay_max) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 1);
|
||||
@@ -386,7 +387,7 @@ pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
|
||||
void
|
||||
pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
|
||||
malloc_mutex_assert_owner(tsdn, &decay->mtx);
|
||||
pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache, fully_decay,
|
||||
/* npages_limit */ 0, ecache_npages_get(ecache));
|
||||
@@ -394,7 +395,7 @@ pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
|
||||
static void
|
||||
pa_decay_try_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
size_t current_npages, size_t npages_limit) {
|
||||
if (current_npages > npages_limit) {
|
||||
pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache,
|
||||
@@ -405,7 +406,7 @@ pa_decay_try_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
|
||||
bool
|
||||
pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
||||
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
pac_decay_stats_t *decay_stats, ecache_t *ecache,
|
||||
pa_decay_purge_setting_t decay_purge_setting) {
|
||||
malloc_mutex_assert_owner(tsdn, &decay->mtx);
|
||||
|
||||
|
@@ -63,13 +63,13 @@ pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
|
||||
|
||||
void
|
||||
pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
pa_shard_stats_t *shard_stats_out, pa_extent_stats_t *extent_stats_out,
|
||||
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
|
||||
size_t *resident) {
|
||||
cassert(config_stats);
|
||||
|
||||
shard_stats_out->retained +=
|
||||
pa_shard_stats_out->pac_stats.retained +=
|
||||
ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
|
||||
shard_stats_out->edata_avail += atomic_load_zu(
|
||||
pa_shard_stats_out->edata_avail += atomic_load_zu(
|
||||
&shard->edata_cache.count, ATOMIC_RELAXED);
|
||||
|
||||
size_t resident_pgs = 0;
|
||||
@@ -79,34 +79,34 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
|
||||
/* Dirty decay stats */
|
||||
locked_inc_u64_unsynchronized(
|
||||
&shard_stats_out->decay_dirty.npurge,
|
||||
&pa_shard_stats_out->pac_stats.decay_dirty.npurge,
|
||||
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_dirty.npurge));
|
||||
&shard->pac.stats->decay_dirty.npurge));
|
||||
locked_inc_u64_unsynchronized(
|
||||
&shard_stats_out->decay_dirty.nmadvise,
|
||||
&pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
|
||||
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_dirty.nmadvise));
|
||||
&shard->pac.stats->decay_dirty.nmadvise));
|
||||
locked_inc_u64_unsynchronized(
|
||||
&shard_stats_out->decay_dirty.purged,
|
||||
&pa_shard_stats_out->pac_stats.decay_dirty.purged,
|
||||
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_dirty.purged));
|
||||
&shard->pac.stats->decay_dirty.purged));
|
||||
|
||||
/* Muzzy decay stats */
|
||||
locked_inc_u64_unsynchronized(
|
||||
&shard_stats_out->decay_muzzy.npurge,
|
||||
&pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
|
||||
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_muzzy.npurge));
|
||||
&shard->pac.stats->decay_muzzy.npurge));
|
||||
locked_inc_u64_unsynchronized(
|
||||
&shard_stats_out->decay_muzzy.nmadvise,
|
||||
&pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
|
||||
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_muzzy.nmadvise));
|
||||
&shard->pac.stats->decay_muzzy.nmadvise));
|
||||
locked_inc_u64_unsynchronized(
|
||||
&shard_stats_out->decay_muzzy.purged,
|
||||
&pa_shard_stats_out->pac_stats.decay_muzzy.purged,
|
||||
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
|
||||
&shard->stats->decay_muzzy.purged));
|
||||
&shard->pac.stats->decay_muzzy.purged));
|
||||
|
||||
atomic_load_add_store_zu(&shard_stats_out->abandoned_vm,
|
||||
atomic_load_zu(&shard->stats->abandoned_vm, ATOMIC_RELAXED));
|
||||
atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
|
||||
atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
|
||||
|
||||
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
|
||||
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
|
||||
@@ -119,12 +119,12 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
|
||||
i);
|
||||
|
||||
extent_stats_out[i].ndirty = dirty;
|
||||
extent_stats_out[i].nmuzzy = muzzy;
|
||||
extent_stats_out[i].nretained = retained;
|
||||
extent_stats_out[i].dirty_bytes = dirty_bytes;
|
||||
extent_stats_out[i].muzzy_bytes = muzzy_bytes;
|
||||
extent_stats_out[i].retained_bytes = retained_bytes;
|
||||
estats_out[i].ndirty = dirty;
|
||||
estats_out[i].nmuzzy = muzzy;
|
||||
estats_out[i].nretained = retained;
|
||||
estats_out[i].dirty_bytes = dirty_bytes;
|
||||
estats_out[i].muzzy_bytes = muzzy_bytes;
|
||||
estats_out[i].retained_bytes = retained_bytes;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,7 +6,7 @@
|
||||
bool
|
||||
pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
|
||||
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
|
||||
ssize_t muzzy_decay_ms) {
|
||||
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
|
||||
/*
|
||||
* Delay coalescing for dirty extents despite the disruptive effect on
|
||||
* memory layout for best-fit extent allocation, since cached extents
|
||||
@@ -47,6 +47,8 @@ pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
|
||||
|
||||
pac->emap = emap;
|
||||
pac->edata_cache = edata_cache;
|
||||
pac->stats = pac_stats;
|
||||
pac->stats_mtx = stats_mtx;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user