PA: Minor cleanups and comment fixes.

This commit is contained in:
David Goldblatt 2020-03-11 16:13:36 -07:00 committed by David Goldblatt
parent 46a9d7fc0b
commit c075fd0bcb
3 changed files with 38 additions and 15 deletions

View File

@ -56,6 +56,21 @@ struct pa_shard_stats_s {
atomic_zu_t abandoned_vm; atomic_zu_t abandoned_vm;
}; };
/*
* The local allocator handle. Keeps the state necessary to satisfy page-sized
* allocations.
*
* The contents are mostly internal to the PA module. The key exception is that
* arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
* decay_ts, for a couple of queries, passing them back to a PA function, or
* acquiring decay.mtx and looking at decay.purging. The reasoning is that,
* while PA decides what and how to purge, the arena code decides when and where
* (e.g. on what thread). It's allowed to use the presence of another purger to
* decide.
* (The background thread code also touches some other decay internals, but
* that's not fundamental; its' just an artifact of a partial refactoring, and
* its accesses could be straightforwardly moved inside the decay module).
*/
typedef struct pa_shard_s pa_shard_t; typedef struct pa_shard_s pa_shard_t;
struct pa_shard_s { struct pa_shard_s {
/* /*
@ -148,15 +163,23 @@ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *generated_dirty); bool *generated_dirty);
void pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, /*
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, * All purging functions require holding decay->mtx. This is one of the few
size_t npages_limit, size_t npages_decay_max); * places external modules are allowed to peek inside pa_shard_t internals.
void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, */
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
/*
* Decays the number of pages currently in the ecache. This might not leave the
* ecache empty if other threads are inserting dirty objects into it
* concurrently with the call.
*/
void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay); pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
/* Returns true if the epoch advanced. */ /*
* Updates decay settings for the current time, and conditionally purges in
* response (depending on decay_purge_setting). Returns whether or not the
* epoch advanced.
*/
bool pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, bool pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
pa_decay_purge_setting_t decay_purge_setting); pa_decay_purge_setting_t decay_purge_setting);

View File

@ -582,12 +582,14 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
bool is_background_thread, bool all) { bool is_background_thread, bool all) {
if (all) { if (all) {
malloc_mutex_lock(tsdn, &decay->mtx);
pa_decay_all(tsdn, &arena->pa_shard, decay, decay_stats, ecache, pa_decay_all(tsdn, &arena->pa_shard, decay, decay_stats, ecache,
/* fully_decay */ all); /* fully_decay */ all);
malloc_mutex_unlock(tsdn, &decay->mtx);
/* /*
* The previous pa_decay_to_limit call may not have actually * The previous pa_decay_all call may not have actually decayed
* decayed all pages, if new pages were added concurrently with * all pages, if new pages were added concurrently with the
* the purge. * purge.
* *
* I don't think we need an activity check for that case (some * I don't think we need an activity check for that case (some
* other thread must be deallocating, and they should do one), * other thread must be deallocating, and they should do one),

View File

@ -260,13 +260,12 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
* stashed), otherwise unbounded new pages could be added to extents during the * stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes. * current decay run, so that the purging thread never finishes.
*/ */
void static void
pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
size_t npages_limit, size_t npages_decay_max) { size_t npages_limit, size_t npages_decay_max) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1); WITNESS_RANK_CORE, 1);
malloc_mutex_assert_owner(tsdn, &decay->mtx);
if (decay->purging || npages_decay_max == 0) { if (decay->purging || npages_decay_max == 0) {
return; return;
@ -291,10 +290,9 @@ pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
void void
pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) { pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
malloc_mutex_lock(tsdn, &decay->mtx); malloc_mutex_assert_owner(tsdn, &decay->mtx);
pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache, pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache, fully_decay,
fully_decay, 0, ecache_npages_get(ecache)); /* npages_limit */ 0, ecache_npages_get(ecache));
malloc_mutex_unlock(tsdn, &decay->mtx);
} }
static void static void