PA->PAC: Move in stats.

This commit is contained in:
David Goldblatt 2020-06-01 17:42:27 -07:00 committed by David Goldblatt
parent db211eefbf
commit 7391382349
11 changed files with 159 additions and 144 deletions

View File

@ -27,7 +27,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
pa_extent_stats_t *estats);
pac_estats_t *estats);
void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);

View File

@ -44,7 +44,7 @@ typedef struct ctl_arena_stats_s {
bin_stats_data_t bstats[SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
pa_extent_stats_t estats[SC_NPSIZES];
pac_estats_t estats[SC_NPSIZES];
} ctl_arena_stats_t;
typedef struct ctl_stats_s {

View File

@ -25,33 +25,6 @@ enum pa_decay_purge_setting_e {
};
typedef enum pa_decay_purge_setting_e pa_decay_purge_setting_t;
typedef struct pa_shard_decay_stats_s pa_shard_decay_stats_t;
struct pa_shard_decay_stats_s {
/* Total number of purge sweeps. */
locked_u64_t npurge;
/* Total number of madvise calls made. */
locked_u64_t nmadvise;
/* Total number of pages purged. */
locked_u64_t purged;
};
typedef struct pa_extent_stats_s pa_extent_stats_t;
struct pa_extent_stats_s {
/*
* Stats for a given index in the range [0, SC_NPSIZES] in the various
* ecache_ts.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
size_t ndirty;
size_t dirty_bytes;
size_t nmuzzy;
size_t muzzy_bytes;
size_t nretained;
size_t retained_bytes;
};
/*
* The stats for a particular pa_shard. Because of the way the ctl module
* handles stats epoch data collection (it has its own arena_stats, and merges
@ -65,30 +38,15 @@ struct pa_extent_stats_s {
*/
typedef struct pa_shard_stats_s pa_shard_stats_t;
struct pa_shard_stats_s {
pa_shard_decay_stats_t decay_dirty;
pa_shard_decay_stats_t decay_muzzy;
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
size_t retained; /* Derived. */
/*
* Number of bytes currently mapped, excluding retained memory (and any
* base-allocated memory, which is tracked by the arena stats).
*
* We name this "pa_mapped" to avoid confusion with the arena_stats
* "mapped".
*/
atomic_zu_t pa_mapped;
/* Number of edata_t structs allocated by base, but not being used. */
size_t edata_avail; /* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
/*
* Stats specific to the PAC. For now, these are the only stats that
* exist, but there will eventually be other page allocators. Things
* like edata_avail make sense in a cross-PA sense, but things like
* npurges don't.
*/
pac_stats_t pac_stats;
};
/*
@ -208,14 +166,14 @@ void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
* concurrently with the call.
*/
void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
/*
* Updates decay settings for the current time, and conditionally purges in
* response (depending on decay_purge_setting). Returns whether or not the
* epoch advanced.
*/
bool pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
pa_decay_purge_setting_t decay_purge_setting);
/*
@ -251,13 +209,8 @@ void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
size_t *ndirty, size_t *nmuzzy);
static inline size_t
pa_shard_pa_mapped(pa_shard_t *shard) {
return atomic_load_zu(&shard->stats->pa_mapped, ATOMIC_RELAXED);
}
void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *shard_stats_out, pa_extent_stats_t *extent_stats_out,
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
size_t *resident);
/*

View File

@ -9,6 +9,58 @@
* - Can use efficient OS-level zeroing primitives for demand-filled pages.
*/
typedef struct pac_decay_stats_s pac_decay_stats_t;
struct pac_decay_stats_s {
/* Total number of purge sweeps. */
locked_u64_t npurge;
/* Total number of madvise calls made. */
locked_u64_t nmadvise;
/* Total number of pages purged. */
locked_u64_t purged;
};
typedef struct pac_estats_s pac_estats_t;
struct pac_estats_s {
/*
* Stats for a given index in the range [0, SC_NPSIZES] in the various
* ecache_ts.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
size_t ndirty;
size_t dirty_bytes;
size_t nmuzzy;
size_t muzzy_bytes;
size_t nretained;
size_t retained_bytes;
};
typedef struct pac_stats_s pac_stats_t;
struct pac_stats_s {
pac_decay_stats_t decay_dirty;
pac_decay_stats_t decay_muzzy;
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
size_t retained; /* Derived. */
/*
* Number of bytes currently mapped, excluding retained memory (and any
* base-allocated memory, which is tracked by the arena stats).
*
* We name this "pac_mapped" to avoid confusion with the arena_stats
* "mapped".
*/
atomic_zu_t pac_mapped;
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
};
typedef struct pac_s pac_t;
struct pac_s {
/*
@ -35,13 +87,18 @@ struct pac_s {
*/
decay_t decay_dirty; /* dirty --> muzzy */
decay_t decay_muzzy; /* muzzy --> retained */
malloc_mutex_t *stats_mtx;
pac_stats_t *stats;
};
bool pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms);
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx);
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit);
void pac_stats_merge(tsdn_t *tsdn, pac_t *pac, pac_stats_t *pac_stats_out,
pac_estats_t *estats_out, size_t *resident);
static inline ssize_t
pac_dirty_decay_ms_get(pac_t *pac) {
@ -53,4 +110,9 @@ pac_muzzy_decay_ms_get(pac_t *pac) {
return decay_ms_read(&pac->decay_muzzy);
}
static inline size_t
pac_mapped(pac_t *pac) {
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
}
#endif /* JEMALLOC_INTERNAL_PAC_H */

View File

@ -80,7 +80,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
pa_extent_stats_t *estats) {
pac_estats_t *estats) {
cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
@ -89,8 +89,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
size_t pa_mapped = pa_shard_pa_mapped(&arena->pa_shard);
astats->mapped += base_mapped + pa_mapped;
size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
astats->mapped += base_mapped + pac_mapped_sz;
astats->resident += base_resident;
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
@ -423,7 +423,7 @@ arena_decide_unforced_decay_purge_setting(bool is_background_thread) {
static bool
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, ssize_t decay_ms) {
pac_decay_stats_t *decay_stats, ecache_t *ecache, ssize_t decay_ms) {
if (!decay_ms_valid(decay_ms)) {
return true;
}
@ -454,7 +454,7 @@ bool
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
&arena->pa_shard.stats->decay_dirty,
&arena->pa_shard.pac.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, decay_ms);
}
@ -462,13 +462,13 @@ bool
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
&arena->pa_shard.stats->decay_muzzy,
&arena->pa_shard.pac.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, decay_ms);
}
static bool
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
bool is_background_thread, bool all) {
if (all) {
malloc_mutex_lock(tsdn, &decay->mtx);
@ -521,7 +521,7 @@ static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
&arena->pa_shard.stats->decay_dirty,
&arena->pa_shard.pac.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
}
@ -532,7 +532,7 @@ arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
return false;
}
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
&arena->pa_shard.stats->decay_muzzy,
&arena->pa_shard.pac.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
}

View File

@ -831,7 +831,7 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
sizeof(pa_extent_stats_t));
sizeof(pac_estats_t));
}
}
@ -889,32 +889,31 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
if (!destroyed) {
sdstats->astats.mapped += astats->astats.mapped;
sdstats->astats.pa_shard_stats.retained
+= astats->astats.pa_shard_stats.retained;
sdstats->astats.pa_shard_stats.pac_stats.retained
+= astats->astats.pa_shard_stats.pac_stats.retained;
sdstats->astats.pa_shard_stats.edata_avail
+= astats->astats.pa_shard_stats.edata_avail;
}
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_dirty.npurge,
&astats->astats.pa_shard_stats.decay_dirty.npurge);
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_dirty.nmadvise,
&astats->astats.pa_shard_stats.decay_dirty.nmadvise);
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_dirty.purged,
&astats->astats.pa_shard_stats.decay_dirty.purged);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_muzzy.npurge,
&astats->astats.pa_shard_stats.decay_muzzy.npurge);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_muzzy.nmadvise,
&astats->astats.pa_shard_stats.decay_muzzy.nmadvise);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.decay_muzzy.purged,
&astats->astats.pa_shard_stats.decay_muzzy.purged);
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
@ -957,8 +956,8 @@ MUTEX_PROF_ARENA_MUTEXES
+= astats->astats.nrequests_large;
sdstats->astats.nflushes_large += astats->astats.nflushes_large;
ctl_accum_atomic_zu(
&sdstats->astats.pa_shard_stats.abandoned_vm,
&astats->astats.pa_shard_stats.abandoned_vm);
&sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
&astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
@ -1117,8 +1116,8 @@ ctl_refresh(tsdn_t *tsdn) {
ctl_stats->metadata_thp =
ctl_sarena->astats->astats.metadata_thp;
ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
ctl_stats->retained =
ctl_sarena->astats->astats.pa_shard_stats.retained;
ctl_stats->retained = ctl_sarena->astats->astats
.pa_shard_stats.pac_stats.retained;
ctl_background_thread_stats_read(tsdn);
@ -2976,35 +2975,34 @@ CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
arenas_i(mib[2])->astats->astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
arenas_i(mib[2])->astats->astats.pa_shard_stats.retained,
size_t)
arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_dirty.npurge),
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_dirty.nmadvise),
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_dirty.purged),
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_muzzy.npurge),
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_muzzy.nmadvise),
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.decay_muzzy.purged),
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
@ -3022,7 +3020,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
atomic_load_zu(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.abandoned_vm,
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,

View File

@ -196,7 +196,7 @@ extents_abandon_vm(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool growing_retained) {
size_t sz = edata_size_get(edata);
if (config_stats) {
atomic_fetch_add_zu(&shard->stats->abandoned_vm, sz,
atomic_fetch_add_zu(&shard->pac.stats->abandoned_vm, sz,
ATOMIC_RELAXED);
}
/*
@ -938,21 +938,20 @@ extent_maximally_purge(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
extent_dalloc_wrapper(tsdn, shard, ehooks, edata);
if (config_stats) {
/* Update stats accordingly. */
LOCKEDINT_MTX_LOCK(tsdn, *shard->stats_mtx);
LOCKEDINT_MTX_LOCK(tsdn, *shard->pac.stats_mtx);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.nmadvise, 1);
LOCKEDINT_MTX(*shard->pac.stats_mtx),
&shard->pac.stats->decay_dirty.nmadvise, 1);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.purged,
LOCKEDINT_MTX(*shard->pac.stats_mtx),
&shard->pac.stats->decay_dirty.purged,
extent_size >> LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
atomic_fetch_sub_zu(&shard->stats->pa_mapped, extent_size,
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->pac.stats_mtx);
atomic_fetch_sub_zu(&shard->pac.stats->pac_mapped, extent_size,
ATOMIC_RELAXED);
}
}
/*
* Does the metadata management portions of putting an unused extent into the
* given ecache_t (coalesces and inserts into the eset).

View File

@ -30,7 +30,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
return true;
}
if (pac_init(tsdn, &shard->pac, ind, emap, &shard->edata_cache,
cur_time, dirty_decay_ms, muzzy_decay_ms)) {
cur_time, dirty_decay_ms, muzzy_decay_ms, &stats->pac_stats,
stats_mtx)) {
return true;
}
@ -106,7 +107,7 @@ ecache_pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
edata = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->pac.ecache_retained, NULL, size, alignment, zero);
if (config_stats && edata != NULL) {
atomic_fetch_add_zu(&shard->stats->pa_mapped, size,
atomic_fetch_add_zu(&shard->pac.stats->pac_mapped, size,
ATOMIC_RELAXED);
}
}
@ -170,7 +171,7 @@ ecache_pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
return true;
}
if (config_stats && mapped_add > 0) {
atomic_fetch_add_zu(&shard->stats->pa_mapped, mapped_add,
atomic_fetch_add_zu(&shard->pac.stats->pac_mapped, mapped_add,
ATOMIC_RELAXED);
}
return false;
@ -288,7 +289,7 @@ pa_stash_decayed(tsdn_t *tsdn, pa_shard_t *shard, ecache_t *ecache,
static size_t
pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
edata_list_inactive_t *decay_extents) {
bool err;
@ -343,7 +344,7 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
locked_inc_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&decay_stats->purged, npurged);
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
atomic_fetch_sub_zu(&shard->stats->pa_mapped,
atomic_fetch_sub_zu(&shard->pac.stats->pac_mapped,
nunmapped << LG_PAGE, ATOMIC_RELAXED);
}
@ -359,7 +360,7 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
*/
static void
pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
size_t npages_limit, size_t npages_decay_max) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1);
@ -386,7 +387,7 @@ pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
void
pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);
pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache, fully_decay,
/* npages_limit */ 0, ecache_npages_get(ecache));
@ -394,7 +395,7 @@ pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
static void
pa_decay_try_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
size_t current_npages, size_t npages_limit) {
if (current_npages > npages_limit) {
pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache,
@ -405,7 +406,7 @@ pa_decay_try_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
bool
pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
pa_decay_purge_setting_t decay_purge_setting) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);

View File

@ -63,13 +63,13 @@ pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
void
pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *shard_stats_out, pa_extent_stats_t *extent_stats_out,
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
size_t *resident) {
cassert(config_stats);
shard_stats_out->retained +=
pa_shard_stats_out->pac_stats.retained +=
ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
shard_stats_out->edata_avail += atomic_load_zu(
pa_shard_stats_out->edata_avail += atomic_load_zu(
&shard->edata_cache.count, ATOMIC_RELAXED);
size_t resident_pgs = 0;
@ -79,34 +79,34 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
/* Dirty decay stats */
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_dirty.npurge,
&pa_shard_stats_out->pac_stats.decay_dirty.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.npurge));
&shard->pac.stats->decay_dirty.npurge));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_dirty.nmadvise,
&pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.nmadvise));
&shard->pac.stats->decay_dirty.nmadvise));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_dirty.purged,
&pa_shard_stats_out->pac_stats.decay_dirty.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_dirty.purged));
&shard->pac.stats->decay_dirty.purged));
/* Muzzy decay stats */
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_muzzy.npurge,
&pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_muzzy.npurge));
&shard->pac.stats->decay_muzzy.npurge));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_muzzy.nmadvise,
&pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_muzzy.nmadvise));
&shard->pac.stats->decay_muzzy.nmadvise));
locked_inc_u64_unsynchronized(
&shard_stats_out->decay_muzzy.purged,
&pa_shard_stats_out->pac_stats.decay_muzzy.purged,
locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
&shard->stats->decay_muzzy.purged));
&shard->pac.stats->decay_muzzy.purged));
atomic_load_add_store_zu(&shard_stats_out->abandoned_vm,
atomic_load_zu(&shard->stats->abandoned_vm, ATOMIC_RELAXED));
atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
@ -119,12 +119,12 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
i);
extent_stats_out[i].ndirty = dirty;
extent_stats_out[i].nmuzzy = muzzy;
extent_stats_out[i].nretained = retained;
extent_stats_out[i].dirty_bytes = dirty_bytes;
extent_stats_out[i].muzzy_bytes = muzzy_bytes;
extent_stats_out[i].retained_bytes = retained_bytes;
estats_out[i].ndirty = dirty;
estats_out[i].nmuzzy = muzzy;
estats_out[i].nretained = retained;
estats_out[i].dirty_bytes = dirty_bytes;
estats_out[i].muzzy_bytes = muzzy_bytes;
estats_out[i].retained_bytes = retained_bytes;
}
}

View File

@ -6,7 +6,7 @@
bool
pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms) {
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
/*
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
@ -47,6 +47,8 @@ pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
pac->emap = emap;
pac->edata_cache = edata_cache;
pac->stats = pac_stats;
pac->stats_mtx = stats_mtx;
return false;
}

View File

@ -90,7 +90,7 @@ do_alloc_free_purge(void *arg) {
&test_data->shard.pac.decay_dirty.mtx);
pa_decay_all(TSDN_NULL, &test_data->shard,
&test_data->shard.pac.decay_dirty,
&test_data->stats.decay_dirty,
&test_data->shard.pac.stats->decay_dirty,
&test_data->shard.pac.ecache_dirty, true);
malloc_mutex_unlock(TSDN_NULL,
&test_data->shard.pac.decay_dirty.mtx);