From c075fd0bcb4a4de13204d26ff400bd315811e435 Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Wed, 11 Mar 2020 16:13:36 -0700 Subject: [PATCH] PA: Minor cleanups and comment fixes. --- include/jemalloc/internal/pa.h | 35 ++++++++++++++++++++++++++++------ src/arena.c | 8 +++++--- src/pa.c | 10 ++++------ 3 files changed, 38 insertions(+), 15 deletions(-) diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index d99b9b73..9636ced9 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -56,6 +56,21 @@ struct pa_shard_stats_s { atomic_zu_t abandoned_vm; }; +/* + * The local allocator handle. Keeps the state necessary to satisfy page-sized + * allocations. + * + * The contents are mostly internal to the PA module. The key exception is that + * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches + * decay_ts, for a couple of queries, passing them back to a PA function, or + * acquiring decay.mtx and looking at decay.purging. The reasoning is that, + * while PA decides what and how to purge, the arena code decides when and where + * (e.g. on what thread). It's allowed to use the presence of another purger to + * decide. + * (The background thread code also touches some other decay internals, but + * that's not fundamental; its' just an artifact of a partial refactoring, and + * its accesses could be straightforwardly moved inside the decay module). + */ typedef struct pa_shard_s pa_shard_t; struct pa_shard_s { /* @@ -148,15 +163,23 @@ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, bool *generated_dirty); -void pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, - pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, - size_t npages_limit, size_t npages_decay_max); -void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, - pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay); +/* + * All purging functions require holding decay->mtx. This is one of the few + * places external modules are allowed to peek inside pa_shard_t internals. + */ +/* + * Decays the number of pages currently in the ecache. This might not leave the + * ecache empty if other threads are inserting dirty objects into it + * concurrently with the call. + */ void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay); -/* Returns true if the epoch advanced. */ +/* + * Updates decay settings for the current time, and conditionally purges in + * response (depending on decay_purge_setting). Returns whether or not the + * epoch advanced. + */ bool pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, pa_decay_purge_setting_t decay_purge_setting); diff --git a/src/arena.c b/src/arena.c index d1e61365..25fad273 100644 --- a/src/arena.c +++ b/src/arena.c @@ -582,12 +582,14 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool is_background_thread, bool all) { if (all) { + malloc_mutex_lock(tsdn, &decay->mtx); pa_decay_all(tsdn, &arena->pa_shard, decay, decay_stats, ecache, /* fully_decay */ all); + malloc_mutex_unlock(tsdn, &decay->mtx); /* - * The previous pa_decay_to_limit call may not have actually - * decayed all pages, if new pages were added concurrently with - * the purge. + * The previous pa_decay_all call may not have actually decayed + * all pages, if new pages were added concurrently with the + * purge. * * I don't think we need an activity check for that case (some * other thread must be deallocating, and they should do one), diff --git a/src/pa.c b/src/pa.c index 06c205c4..d9eeb694 100644 --- a/src/pa.c +++ b/src/pa.c @@ -260,13 +260,12 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, * stashed), otherwise unbounded new pages could be added to extents during the * current decay run, so that the purging thread never finishes. */ -void +static void pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, size_t npages_limit, size_t npages_decay_max) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1); - malloc_mutex_assert_owner(tsdn, &decay->mtx); if (decay->purging || npages_decay_max == 0) { return; @@ -291,10 +290,9 @@ pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, void pa_decay_all(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) { - malloc_mutex_lock(tsdn, &decay->mtx); - pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache, - fully_decay, 0, ecache_npages_get(ecache)); - malloc_mutex_unlock(tsdn, &decay->mtx); + malloc_mutex_assert_owner(tsdn, &decay->mtx); + pa_decay_to_limit(tsdn, shard, decay, decay_stats, ecache, fully_decay, + /* npages_limit */ 0, ecache_npages_get(ecache)); } static void