PAC: Move in decay.

This commit is contained in:
David Goldblatt 2020-06-01 16:35:17 -07:00 committed by David Goldblatt
parent c81e389996
commit db211eefbf
11 changed files with 58 additions and 58 deletions

View File

@ -131,9 +131,6 @@ arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_not_owner(tsdn, &arena->pa_shard.decay_dirty.mtx);
malloc_mutex_assert_not_owner(tsdn, &arena->pa_shard.decay_muzzy.mtx);
arena_decay_ticks(tsdn, arena, 1);
}

View File

@ -55,7 +55,7 @@ arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
background_thread_interval_check(tsdn, arena,
&arena->pa_shard.decay_dirty, 0);
&arena->pa_shard.pac.decay_dirty, 0);
}
}

View File

@ -126,22 +126,12 @@ struct pa_shard_s {
/* The source of edata_t objects. */
edata_cache_t edata_cache;
/* Extent serial number generator state. */
atomic_zu_t extent_sn_next;
malloc_mutex_t *stats_mtx;
pa_shard_stats_t *stats;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: via the internal mutex.
*/
decay_t decay_dirty; /* dirty --> muzzy */
decay_t decay_muzzy; /* muzzy --> retained */
/* The emap this shard is tied to. */
emap_t *emap;
@ -149,25 +139,16 @@ struct pa_shard_s {
base_t *base;
};
static inline ssize_t
pa_shard_dirty_decay_ms_get(pa_shard_t *shard) {
return decay_ms_read(&shard->decay_dirty);
}
static inline ssize_t
pa_shard_muzzy_decay_ms_get(pa_shard_t *shard) {
return decay_ms_read(&shard->decay_muzzy);
}
static inline bool
pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
pa_shard_muzzy_decay_ms_get(shard) <= 0;
pac_muzzy_decay_ms_get(&shard->pac) <= 0;
}
static inline bool
pa_shard_may_force_decay(pa_shard_t *shard) {
return !(pa_shard_dirty_decay_ms_get(shard) == -1
|| pa_shard_muzzy_decay_ms_get(shard) == -1);
return !(pac_dirty_decay_ms_get(&shard->pac) == -1
|| pac_muzzy_decay_ms_get(&shard->pac) == -1);
}
static inline ehooks_t *

View File

@ -26,11 +26,31 @@ struct pac_s {
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: via the internal mutex.
*/
decay_t decay_dirty; /* dirty --> muzzy */
decay_t decay_muzzy; /* muzzy --> retained */
};
bool pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
edata_cache_t *edata_cache);
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms);
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit);
static inline ssize_t
pac_dirty_decay_ms_get(pac_t *pac) {
return decay_ms_read(&pac->decay_dirty);
}
static inline ssize_t
pac_muzzy_decay_ms_get(pac_t *pac) {
return decay_ms_read(&pac->decay_muzzy);
}
#endif /* JEMALLOC_INTERNAL_PAC_H */

View File

@ -397,12 +397,12 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
ssize_t
arena_dirty_decay_ms_get(arena_t *arena) {
return pa_shard_dirty_decay_ms_get(&arena->pa_shard);
return pac_dirty_decay_ms_get(&arena->pa_shard.pac);
}
ssize_t
arena_muzzy_decay_ms_get(arena_t *arena) {
return pa_shard_muzzy_decay_ms_get(&arena->pa_shard);
return pac_muzzy_decay_ms_get(&arena->pa_shard.pac);
}
/*
@ -453,7 +453,7 @@ arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
bool
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.decay_dirty,
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
&arena->pa_shard.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, decay_ms);
}
@ -461,7 +461,7 @@ arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
bool
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.decay_muzzy,
return arena_decay_ms_set(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
&arena->pa_shard.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, decay_ms);
}
@ -520,7 +520,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
return arena_decay_impl(tsdn, arena, &arena->pa_shard.decay_dirty,
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
&arena->pa_shard.stats->decay_dirty,
&arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
}
@ -531,7 +531,7 @@ arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
return false;
}
return arena_decay_impl(tsdn, arena, &arena->pa_shard.decay_muzzy,
return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
&arena->pa_shard.stats->decay_muzzy,
&arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
}

View File

@ -201,12 +201,12 @@ static uint64_t
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
uint64_t i1, i2;
i1 = arena_decay_compute_purge_interval_impl(tsdn,
&arena->pa_shard.decay_dirty, &arena->pa_shard.pac.ecache_dirty);
&arena->pa_shard.pac.decay_dirty, &arena->pa_shard.pac.ecache_dirty);
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return i1;
}
i2 = arena_decay_compute_purge_interval_impl(tsdn,
&arena->pa_shard.decay_muzzy, &arena->pa_shard.pac.ecache_muzzy);
&arena->pa_shard.pac.decay_muzzy, &arena->pa_shard.pac.ecache_muzzy);
return i1 < i2 ? i1 : i2;
}

View File

@ -3130,8 +3130,8 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
MUTEX_PROF_RESET(arena->pa_shard.decay_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);

View File

@ -29,14 +29,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
if (edata_cache_init(&shard->edata_cache, base)) {
return true;
}
if (pac_init(tsdn, &shard->pac, ind, emap, &shard->edata_cache)) {
return true;
}
if (decay_init(&shard->decay_dirty, cur_time, dirty_decay_ms)) {
return true;
}
if (decay_init(&shard->decay_muzzy, cur_time, muzzy_decay_ms)) {
if (pac_init(tsdn, &shard->pac, ind, emap, &shard->edata_cache,
cur_time, dirty_decay_ms, muzzy_decay_ms)) {
return true;
}
@ -91,7 +85,7 @@ pa_shard_extent_sn_next(pa_shard_t *shard) {
static bool
pa_shard_may_have_muzzy(pa_shard_t *shard) {
return pa_shard_muzzy_decay_ms_get(shard) != 0;
return pac_muzzy_decay_ms_get(&shard->pac) != 0;
}
static edata_t *

View File

@ -10,8 +10,8 @@
void
pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
malloc_mutex_prefork(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_prefork(tsdn, &shard->decay_muzzy.mtx);
malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx);
}
void
@ -38,8 +38,8 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_parent(tsdn, &shard->pac.ecache_grow);
malloc_mutex_postfork_parent(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->decay_muzzy.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
}
void
@ -49,8 +49,8 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_child(tsdn, &shard->pac.ecache_grow);
malloc_mutex_postfork_child(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->decay_muzzy.mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
}
void
@ -148,7 +148,7 @@ pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->decay_dirty.mtx, arena_prof_mutex_decay_dirty);
&shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty);
pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
&shard->decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
&shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
}

View File

@ -5,7 +5,8 @@
bool
pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
edata_cache_t *edata_cache) {
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms) {
/*
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
@ -37,6 +38,12 @@ pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
if (ecache_grow_init(tsdn, &pac->ecache_grow)) {
return true;
}
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
return true;
}
if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
return true;
}
pac->emap = emap;
pac->edata_cache = edata_cache;

View File

@ -86,13 +86,14 @@ do_alloc_free_purge(void *arg) {
bool generated_dirty;
pa_dalloc(TSDN_NULL, &test_data->shard, edata,
&generated_dirty);
malloc_mutex_lock(TSDN_NULL, &test_data->shard.decay_dirty.mtx);
malloc_mutex_lock(TSDN_NULL,
&test_data->shard.pac.decay_dirty.mtx);
pa_decay_all(TSDN_NULL, &test_data->shard,
&test_data->shard.decay_dirty,
&test_data->shard.pac.decay_dirty,
&test_data->stats.decay_dirty,
&test_data->shard.pac.ecache_dirty, true);
malloc_mutex_unlock(TSDN_NULL,
&test_data->shard.decay_dirty.mtx);
&test_data->shard.pac.decay_dirty.mtx);
}
return NULL;
}