Move arena decay getters to PA.

This commit is contained in:
David Goldblatt 2020-03-10 11:04:02 -07:00 committed by David Goldblatt
parent 48a2cd6d79
commit e77f47a85a
4 changed files with 18 additions and 9 deletions

View File

@ -108,12 +108,6 @@ arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx) {
large_prof_info_set(edata, tctx);
}
JEMALLOC_ALWAYS_INLINE bool
arena_may_force_decay(arena_t *arena) {
return !(arena_dirty_decay_ms_get(arena) == -1
|| arena_muzzy_decay_ms_get(arena) == -1);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;

View File

@ -90,6 +90,21 @@ pa_shard_stats_mapped_add(tsdn_t *tsdn, pa_shard_t *shard, size_t size) {
LOCKEDINT_MTX_UNLOCK(tsdn, *shard->stats_mtx);
}
static inline ssize_t
pa_shard_dirty_decay_ms_get(pa_shard_t *shard) {
return decay_ms_read(&shard->decay_dirty);
}
static inline ssize_t
pa_shard_muzzy_decay_ms_get(pa_shard_t *shard) {
return decay_ms_read(&shard->decay_muzzy);
}
static inline bool
pa_shard_may_force_decay(pa_shard_t *shard) {
return !(pa_shard_dirty_decay_ms_get(shard) == -1
|| pa_shard_muzzy_decay_ms_get(shard) == -1);
}
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx);

View File

@ -595,12 +595,12 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
ssize_t
arena_dirty_decay_ms_get(arena_t *arena) {
return decay_ms_read(&arena->pa_shard.decay_dirty);
return pa_shard_dirty_decay_ms_get(&arena->pa_shard);
}
ssize_t
arena_muzzy_decay_ms_get(arena_t *arena) {
return decay_ms_read(&arena->pa_shard.decay_muzzy);
return pa_shard_muzzy_decay_ms_get(&arena->pa_shard);
}
static bool

View File

@ -1013,7 +1013,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
&coalesced, growing_retained);
} while (coalesced);
if (edata_size_get(edata) >= oversize_threshold &&
arena_may_force_decay(arena)) {
pa_shard_may_force_decay(&arena->pa_shard)) {
/* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &ecache->mtx);
extent_maximally_purge(tsdn, arena, ehooks, edata);