Extents -> Eset: Convert some stats getters.
This commit is contained in:
parent
820f070c6b
commit
a42861540e
@ -52,8 +52,8 @@ struct arena_decay_s {
|
||||
/*
|
||||
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||
* extents_npages_get(&arena->extents_*) to determine how many dirty
|
||||
* pages, if any, were generated.
|
||||
* eset_npages_get(&arena->extents_*) to determine how many dirty pages,
|
||||
* if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
/*
|
||||
|
@ -64,4 +64,10 @@ bool eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
|
||||
bool delay_coalesce);
|
||||
extent_state_t eset_state_get(const eset_t *eset);
|
||||
|
||||
size_t eset_npages_get(eset_t *eset);
|
||||
/* Get the number of extents in the given page size index. */
|
||||
size_t eset_nextents_get(eset_t *eset, pszind_t ind);
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ESET_H */
|
||||
|
@ -27,11 +27,6 @@ size_t extent_size_quantize_ceil(size_t size);
|
||||
ph_proto(, extent_avail_, extent_tree_t, extent_t)
|
||||
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
||||
|
||||
size_t extents_npages_get(eset_t *eset);
|
||||
/* Get the number of extents in the given page size index. */
|
||||
size_t extents_nextents_get(eset_t *eset, pszind_t ind);
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
size_t extents_nbytes_get(eset_t *eset, pszind_t ind);
|
||||
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, eset_t *eset, void *new_addr,
|
||||
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
|
||||
|
37
src/arena.c
37
src/arena.c
@ -75,8 +75,8 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
|
||||
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
||||
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
|
||||
*ndirty += extents_npages_get(&arena->extents_dirty);
|
||||
*nmuzzy += extents_npages_get(&arena->extents_muzzy);
|
||||
*ndirty += eset_npages_get(&arena->extents_dirty);
|
||||
*nmuzzy += eset_npages_get(&arena->extents_muzzy);
|
||||
}
|
||||
|
||||
void
|
||||
@ -99,7 +99,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
arena_stats_accum_zu(&astats->mapped, base_mapped
|
||||
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
|
||||
arena_stats_accum_zu(&astats->retained,
|
||||
extents_npages_get(&arena->extents_retained) << LG_PAGE);
|
||||
eset_npages_get(&arena->extents_retained) << LG_PAGE);
|
||||
|
||||
atomic_store_zu(&astats->extent_avail,
|
||||
atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
|
||||
@ -130,8 +130,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
|
||||
arena_stats_accum_zu(&astats->resident, base_resident +
|
||||
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
|
||||
extents_npages_get(&arena->extents_dirty) +
|
||||
extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
|
||||
eset_npages_get(&arena->extents_dirty) +
|
||||
eset_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
|
||||
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
|
||||
&arena->stats.abandoned_vm, ATOMIC_RELAXED));
|
||||
|
||||
@ -173,13 +173,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
|
||||
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
|
||||
retained_bytes;
|
||||
dirty = extents_nextents_get(&arena->extents_dirty, i);
|
||||
muzzy = extents_nextents_get(&arena->extents_muzzy, i);
|
||||
retained = extents_nextents_get(&arena->extents_retained, i);
|
||||
dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
|
||||
muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
|
||||
retained_bytes =
|
||||
extents_nbytes_get(&arena->extents_retained, i);
|
||||
dirty = eset_nextents_get(&arena->extents_dirty, i);
|
||||
muzzy = eset_nextents_get(&arena->extents_muzzy, i);
|
||||
retained = eset_nextents_get(&arena->extents_retained, i);
|
||||
dirty_bytes = eset_nbytes_get(&arena->extents_dirty, i);
|
||||
muzzy_bytes = eset_nbytes_get(&arena->extents_muzzy, i);
|
||||
retained_bytes = eset_nbytes_get(&arena->extents_retained, i);
|
||||
|
||||
atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
|
||||
atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
|
||||
@ -645,7 +644,7 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
|
||||
static void
|
||||
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
eset_t *eset, const nstime_t *time, bool is_background_thread) {
|
||||
size_t current_npages = extents_npages_get(eset);
|
||||
size_t current_npages = eset_npages_get(eset);
|
||||
arena_decay_epoch_advance_helper(decay, time, current_npages);
|
||||
|
||||
size_t npages_limit = arena_decay_backlog_npages_limit(decay);
|
||||
@ -720,7 +719,7 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
if (decay_ms <= 0) {
|
||||
if (decay_ms == 0) {
|
||||
arena_decay_to_limit(tsdn, arena, decay, eset, false,
|
||||
0, extents_npages_get(eset),
|
||||
0, eset_npages_get(eset),
|
||||
is_background_thread);
|
||||
}
|
||||
return false;
|
||||
@ -760,7 +759,7 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
is_background_thread);
|
||||
} else if (is_background_thread) {
|
||||
arena_decay_try_purge(tsdn, arena, decay, eset,
|
||||
extents_npages_get(eset),
|
||||
eset_npages_get(eset),
|
||||
arena_decay_backlog_npages_limit(decay),
|
||||
is_background_thread);
|
||||
}
|
||||
@ -907,7 +906,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
/*
|
||||
* npages_limit: Decay at most npages_decay_max pages without violating the
|
||||
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
|
||||
* invariant: (eset_npages_get(extents) >= npages_limit). We need an upper
|
||||
* bound on number of pages in order to prevent unbounded growth (namely in
|
||||
* stashed), otherwise unbounded new pages could be added to extents during the
|
||||
* current decay run, so that the purging thread never finishes.
|
||||
@ -950,7 +949,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
if (all) {
|
||||
malloc_mutex_lock(tsdn, &decay->mtx);
|
||||
arena_decay_to_limit(tsdn, arena, decay, eset, all, 0,
|
||||
extents_npages_get(eset), is_background_thread);
|
||||
eset_npages_get(eset), is_background_thread);
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return false;
|
||||
@ -1177,8 +1176,8 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
||||
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
||||
* extents, so only retained extents may remain.
|
||||
*/
|
||||
assert(extents_npages_get(&arena->extents_dirty) == 0);
|
||||
assert(extents_npages_get(&arena->extents_muzzy) == 0);
|
||||
assert(eset_npages_get(&arena->extents_dirty) == 0);
|
||||
assert(eset_npages_get(&arena->extents_muzzy) == 0);
|
||||
|
||||
/* Deallocate retained memory. */
|
||||
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
||||
|
@ -130,7 +130,7 @@ arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
|
||||
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
size_t npages = extents_npages_get(eset);
|
||||
size_t npages = eset_npages_get(eset);
|
||||
if (npages == 0) {
|
||||
unsigned i;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
@ -718,8 +718,8 @@ background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
should_signal = true;
|
||||
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
|
||||
(extents_npages_get(&arena->extents_dirty) > 0 ||
|
||||
extents_npages_get(&arena->extents_muzzy) > 0 ||
|
||||
(eset_npages_get(&arena->extents_dirty) > 0 ||
|
||||
eset_npages_get(&arena->extents_muzzy) > 0 ||
|
||||
info->npages_to_purge_new > 0)) {
|
||||
should_signal = true;
|
||||
} else {
|
||||
|
15
src/eset.c
15
src/eset.c
@ -28,3 +28,18 @@ extent_state_t
|
||||
eset_state_get(const eset_t *eset) {
|
||||
return eset->state;
|
||||
}
|
||||
|
||||
size_t
|
||||
eset_npages_get(eset_t *eset) {
|
||||
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
size_t
|
||||
eset_nextents_get(eset_t *eset, pszind_t pind) {
|
||||
return atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
size_t
|
||||
eset_nbytes_get(eset_t *eset, pszind_t pind) {
|
||||
return atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
||||
}
|
||||
|
15
src/extent.c
15
src/extent.c
@ -252,21 +252,6 @@ extent_hooks_assure_initialized(arena_t *arena,
|
||||
/* Generate pairing heap functions. */
|
||||
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
|
||||
|
||||
size_t
|
||||
extents_npages_get(eset_t *eset) {
|
||||
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
size_t
|
||||
extents_nextents_get(eset_t *eset, pszind_t pind) {
|
||||
return atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
size_t
|
||||
extents_nbytes_get(eset_t *eset, pszind_t pind) {
|
||||
return atomic_load_zu(&eset->nbytes[pind], ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static void
|
||||
extents_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
|
||||
size_t cur = atomic_load_zu(&eset->nextents[pind], ATOMIC_RELAXED);
|
||||
|
Loading…
Reference in New Issue
Block a user