PA: Move in some more internals accesses.

This commit is contained in:
David Goldblatt 2020-03-12 11:21:22 -07:00 committed by David Goldblatt
parent 238f3c7430
commit 07675840a5
3 changed files with 49 additions and 29 deletions

View File

@ -153,6 +153,12 @@ pa_shard_muzzy_decay_ms_get(pa_shard_t *shard) {
return decay_ms_read(&shard->decay_muzzy); return decay_ms_read(&shard->decay_muzzy);
} }
static inline bool
pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
return ecache_npages_get(&shard->ecache_muzzy) == 0 &&
pa_shard_muzzy_decay_ms_get(shard) <= 0;
}
static inline bool static inline bool
pa_shard_may_force_decay(pa_shard_t *shard) { pa_shard_may_force_decay(pa_shard_t *shard) {
return !(pa_shard_dirty_decay_ms_get(shard) == -1 return !(pa_shard_dirty_decay_ms_get(shard) == -1
@ -167,6 +173,18 @@ pa_shard_ehooks_get(pa_shard_t *shard) {
/* Returns true on error. */ /* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx);
/*
* This does the PA-specific parts of arena reset (i.e. freeing all active
* allocations).
*/
void pa_shard_reset(pa_shard_t *shard);
/*
* Destroy all the remaining retained extents. Should only be called after
* decaying all active, dirty, and muzzy extents to the retained state, as the
* last step in destroying the shard.
*/
void pa_shard_destroy_retained(tsdn_t *tsdn, pa_shard_t *shard);
size_t pa_shard_extent_sn_next(pa_shard_t *shard); size_t pa_shard_extent_sn_next(pa_shard_t *shard);
/* Gets an edata for the given allocation. */ /* Gets an edata for the given allocation. */

View File

@ -544,8 +544,7 @@ arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
static bool static bool
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) { bool all) {
if (ecache_npages_get(&arena->pa_shard.ecache_muzzy) == 0 && if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
arena_muzzy_decay_ms_get(arena) <= 0) {
return false; return false;
} }
return arena_decay_impl(tsdn, arena, &arena->pa_shard.decay_muzzy, return arena_decay_impl(tsdn, arena, &arena->pa_shard.decay_muzzy,
@ -703,27 +702,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
&arena->bins[i].bin_shards[j]); &arena->bins[i].bin_shards[j]);
} }
} }
pa_shard_reset(&arena->pa_shard);
atomic_store_zu(&arena->pa_shard.nactive, 0, ATOMIC_RELAXED);
}
static void
arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
/*
* Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
* all retained memory without having to keep its own metadata
* structures. In practice, virtual memory for dss-allocated extents is
* leaked here, so best practice is to avoid dss for arenas to be
* destroyed, or provide custom extent hooks that track retained
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = arena_get_ehooks(arena);
edata_t *edata;
while ((edata = ecache_evict(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, &arena->pa_shard, ehooks, edata);
}
} }
void void
@ -735,13 +714,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/* /*
* No allocations have occurred since arena_reset() was called. * No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain. * extents, so only retained extents may remain and it's safe to call
* pa_shard_destroy_retained.
*/ */
assert(ecache_npages_get(&arena->pa_shard.ecache_dirty) == 0); pa_shard_destroy_retained(tsd_tsdn(tsd), &arena->pa_shard);
assert(ecache_npages_get(&arena->pa_shard.ecache_muzzy) == 0);
/* Deallocate retained memory. */
arena_destroy_retained(tsd_tsdn(tsd), arena);
/* /*
* Remove the arena pointer from the arenas array. We rely on the fact * Remove the arena pointer from the arenas array. We rely on the fact

View File

@ -65,6 +65,32 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
return false; return false;
} }
void
pa_shard_reset(pa_shard_t *shard) {
atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
}
void
pa_shard_destroy_retained(tsdn_t *tsdn, pa_shard_t *shard) {
assert(ecache_npages_get(&shard->ecache_dirty) == 0);
assert(ecache_npages_get(&shard->ecache_muzzy) == 0);
/*
* Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
* all retained memory without having to keep its own metadata
* structures. In practice, virtual memory for dss-allocated extents is
* leaked here, so best practice is to avoid dss for arenas to be
* destroyed, or provide custom extent hooks that track retained
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
edata_t *edata;
while ((edata = ecache_evict(tsdn, shard, ehooks,
&shard->ecache_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, shard, ehooks, edata);
}
}
size_t size_t
pa_shard_extent_sn_next(pa_shard_t *shard) { pa_shard_extent_sn_next(pa_shard_t *shard) {
return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED); return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED);