PAC: Move in decay rate setting.

This commit is contained in:
David Goldblatt 2020-06-03 18:30:33 -07:00 committed by David Goldblatt
parent 6a2774719f
commit 471eb5913c
9 changed files with 126 additions and 149 deletions

View File

@ -40,10 +40,9 @@ void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
edata_t *edata, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
edata_t *edata, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms);
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
void arena_reset(tsd_t *tsd, arena_t *arena);

View File

@ -90,7 +90,7 @@ struct pa_shard_s {
static inline bool
pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
pac_muzzy_decay_ms_get(&shard->pac) <= 0;
pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
}
static inline ehooks_t *
@ -137,6 +137,10 @@ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *generated_dirty);
bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
/******************************************************************************/
/*
* Various bits of "boring" functionality that are still part of this module,

View File

@ -10,12 +10,12 @@
*/
/* How "eager" decay/purging should be. */
enum pac_decay_purge_setting_e {
PAC_DECAY_PURGE_ALWAYS,
PAC_DECAY_PURGE_NEVER,
PAC_DECAY_PURGE_ON_EPOCH_ADVANCE
enum pac_purge_eagerness_e {
PAC_PURGE_ALWAYS,
PAC_PURGE_NEVER,
PAC_PURGE_ON_EPOCH_ADVANCE
};
typedef enum pac_decay_purge_setting_e pac_decay_purge_setting_t;
typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
typedef struct pac_decay_stats_s pac_decay_stats_t;
struct pac_decay_stats_s {
@ -112,16 +112,6 @@ bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
void pac_stats_merge(tsdn_t *tsdn, pac_t *pac, pac_stats_t *pac_stats_out,
pac_estats_t *estats_out, size_t *resident);
static inline ssize_t
pac_dirty_decay_ms_get(pac_t *pac) {
return decay_ms_read(&pac->decay_dirty);
}
static inline ssize_t
pac_muzzy_decay_ms_get(pac_t *pac) {
return decay_ms_read(&pac->decay_muzzy);
}
static inline size_t
pac_mapped(pac_t *pac) {
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
@ -146,7 +136,7 @@ void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
*/
bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_purge_setting_t decay_purge_setting);
pac_purge_eagerness_t eagerness);
/*
* Gets / sets the maximum amount that we'll grow an arena down the
@ -160,4 +150,7 @@ bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit);
bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
#endif /* JEMALLOC_INTERNAL_PAC_H */

View File

@ -70,8 +70,8 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena_dss_prec_get(arena)];
*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
*dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
*muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
}
@ -189,7 +189,7 @@ void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (arena_dirty_decay_ms_get(arena) == 0) {
if (arena_decay_ms_get(arena, extent_state_dirty) == 0) {
arena_decay_dirty(tsdn, arena, false, true);
} else {
arena_background_thread_inactivity_check(tsdn, arena, false);
@ -395,77 +395,37 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
}
}
ssize_t
arena_dirty_decay_ms_get(arena_t *arena) {
return pac_dirty_decay_ms_get(&arena->pa_shard.pac);
}
ssize_t
arena_muzzy_decay_ms_get(arena_t *arena) {
return pac_muzzy_decay_ms_get(&arena->pa_shard.pac);
}
/*
* In situations where we're not forcing a decay (i.e. because the user
* specifically requested it), should we purge ourselves, or wait for the
* background thread to get to it.
*/
static pac_decay_purge_setting_t
arena_decide_unforced_decay_purge_setting(bool is_background_thread) {
static pac_purge_eagerness_t
arena_decide_unforced_purge_eagerness(bool is_background_thread) {
if (is_background_thread) {
return PAC_DECAY_PURGE_ALWAYS;
return PAC_PURGE_ALWAYS;
} else if (!is_background_thread && background_thread_enabled()) {
return PAC_DECAY_PURGE_NEVER;
return PAC_PURGE_NEVER;
} else {
return PAC_DECAY_PURGE_ON_EPOCH_ADVANCE;
return PAC_PURGE_ON_EPOCH_ADVANCE;
}
}
static bool
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache, ssize_t decay_ms) {
if (!decay_ms_valid(decay_ms)) {
return true;
}
malloc_mutex_lock(tsdn, &decay->mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
nstime_t cur_time;
nstime_init_update(&cur_time);
decay_reinit(decay, &cur_time, decay_ms);
pac_decay_purge_setting_t decay_purge =
arena_decide_unforced_decay_purge_setting(
/* is_background_thread */ false);
pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac, decay, decay_stats,
ecache, decay_purge);
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
}
bool
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
&arena->pa_shard.pac.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, decay_ms);
pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
/* is_background_thread */ false);
return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
eagerness);
}
bool
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
&arena->pa_shard.pac.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, decay_ms);
ssize_t
arena_decay_ms_get(arena_t *arena, extent_state_t state) {
return pa_decay_ms_get(&arena->pa_shard, state);
}
static bool
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
@ -497,10 +457,10 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
/* No need to wait if another thread is in progress. */
return true;
}
pac_decay_purge_setting_t decay_purge =
arena_decide_unforced_decay_purge_setting(is_background_thread);
pac_purge_eagerness_t eagerness =
arena_decide_unforced_purge_eagerness(is_background_thread);
bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
decay, decay_stats, ecache, decay_purge);
decay, decay_stats, ecache, eagerness);
size_t npages_new;
if (epoch_advanced) {
/* Backlog is updated on epoch advance. */
@ -1546,10 +1506,12 @@ arena_choose_huge(tsd_t *tsd) {
* expected for huge allocations.
*/
if (arena_dirty_decay_ms_default_get() > 0) {
arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
extent_state_dirty, 0);
}
if (arena_muzzy_decay_ms_default_get() > 0) {
arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
extent_state_muzzy, 0);
}
}

View File

@ -2430,10 +2430,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EFAULT;
goto label_return;
}
extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
arena_muzzy_decay_ms_get(arena);
size_t oldval = arena_decay_ms_get(arena, state);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@ -2452,9 +2452,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
}
if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
arena, *(ssize_t *)newp)) {
if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}

View File

@ -55,8 +55,8 @@ extent_sn_next(pac_t *pac) {
static inline bool
extent_may_force_decay(pac_t *pac) {
return !(pac_dirty_decay_ms_get(pac) == -1
|| pac_muzzy_decay_ms_get(pac) == -1);
return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
|| pac_decay_ms_get(pac, extent_state_muzzy) == -1);
}
static bool

View File

@ -78,9 +78,9 @@ pa_shard_destroy_retained(tsdn_t *tsdn, pa_shard_t *shard) {
}
}
static bool
static inline bool
pa_shard_may_have_muzzy(pa_shard_t *shard) {
return pac_muzzy_decay_ms_get(&shard->pac) != 0;
return pac_decay_ms_get(&shard->pac, extent_state_muzzy) != 0;
}
static edata_t *
@ -389,60 +389,20 @@ pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
/* npages_limit */ 0, ecache_npages_get(ecache));
}
static void
pa_decay_try_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
size_t current_npages, size_t npages_limit) {
if (current_npages > npages_limit) {
pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache,
/* fully_decay */ false, npages_limit,
current_npages - npages_limit);
}
}
bool
pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_purge_setting_t decay_purge_setting) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);
/* Purge all or nothing if the option is disabled. */
ssize_t decay_ms = decay_ms_read(decay);
if (decay_ms <= 0) {
if (decay_ms == 0) {
pa_decay_to_limit(tsdn, shard, decay, decay_stats,
ecache, /* fully_decay */ false,
/* npages_limit */ 0, ecache_npages_get(ecache));
}
return false;
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
nstime_t time;
nstime_init_update(&time);
size_t npages_current = ecache_npages_get(ecache);
bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
npages_current);
if (decay_purge_setting == PAC_DECAY_PURGE_ALWAYS
|| (epoch_advanced && decay_purge_setting
== PAC_DECAY_PURGE_ON_EPOCH_ADVANCE)) {
size_t npages_limit = decay_npages_limit_get(decay);
pa_decay_try_purge(tsdn, shard, decay, decay_stats, ecache,
npages_current, npages_limit);
}
return epoch_advanced;
}
bool
pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
size_t *old_limit, size_t *new_limit) {
return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
new_limit);
}
bool
pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness);
}
ssize_t
pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
return pac_decay_ms_get(&shard->pac, state);
}

View File

@ -8,6 +8,27 @@ pac_ehooks_get(pac_t *pac) {
return base_ehooks_get(pac->base);
}
static inline void
pac_decay_data_get(pac_t *pac, extent_state_t state,
decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
switch(state) {
case extent_state_dirty:
*r_decay = &pac->decay_dirty;
*r_decay_stats = &pac->stats->decay_dirty;
*r_ecache = &pac->ecache_dirty;
return;
case extent_state_muzzy:
*r_decay = &pac->decay_muzzy;
*r_decay_stats = &pac->stats->decay_muzzy;
*r_ecache = &pac->ecache_muzzy;
return;
default:
unreachable();
}
}
bool
pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
@ -117,7 +138,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
ehooks_t *ehooks = pac_ehooks_get(pac);
bool try_muzzy = !fully_decay && pac_muzzy_decay_ms_get(pac) != 0;
bool try_muzzy = !fully_decay
&& pac_decay_ms_get(pac, extent_state_muzzy) != 0;
for (edata_t *edata = edata_list_first(decay_extents); edata !=
NULL; edata = edata_list_first(decay_extents)) {
@ -225,7 +247,7 @@ pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
bool
pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
pac_decay_stats_t *decay_stats, ecache_t *ecache,
pac_decay_purge_setting_t decay_purge_setting) {
pac_purge_eagerness_t eagerness) {
malloc_mutex_assert_owner(tsdn, &decay->mtx);
/* Purge all or nothing if the option is disabled. */
@ -251,9 +273,8 @@ pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
size_t npages_current = ecache_npages_get(ecache);
bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
npages_current);
if (decay_purge_setting == PAC_DECAY_PURGE_ALWAYS
|| (epoch_advanced && decay_purge_setting
== PAC_DECAY_PURGE_ON_EPOCH_ADVANCE)) {
if (eagerness == PAC_PURGE_ALWAYS
|| (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) {
size_t npages_limit = decay_npages_limit_get(decay);
pac_decay_try_purge(tsdn, pac, decay, decay_stats, ecache,
npages_current, npages_limit);
@ -261,3 +282,42 @@ pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
return epoch_advanced;
}
bool
pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
decay_t *decay;
pac_decay_stats_t *decay_stats;
ecache_t *ecache;
pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
if (!decay_ms_valid(decay_ms)) {
return true;
}
malloc_mutex_lock(tsdn, &decay->mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
nstime_t cur_time;
nstime_init_update(&cur_time);
decay_reinit(decay, &cur_time, decay_ms);
pac_maybe_decay_purge(tsdn, pac, decay, decay_stats, ecache, eagerness);
malloc_mutex_unlock(tsdn, &decay->mtx);
return false;
}
ssize_t
pac_decay_ms_get(pac_t *pac, extent_state_t state) {
decay_t *decay;
pac_decay_stats_t *decay_stats;
ecache_t *ecache;
pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
return decay_ms_read(decay);
}

View File

@ -107,7 +107,6 @@ TEST_BEGIN(test_alloc_free_purge_thds) {
for (int i = 0; i < 4; i++) {
thd_join(thds[i], NULL);
}
}
TEST_END