Add a per-arena oversize_threshold.

This can let manual arenas trade off memory and CPU the way auto arenas do.
This commit is contained in:
David Goldblatt
2020-11-11 13:34:43 -08:00
committed by David Goldblatt
parent 4ca3d91e96
commit cf2549a149
10 changed files with 194 additions and 12 deletions

View File

@@ -1500,7 +1500,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
nstime_init_update(&cur_time);
if (pa_shard_init(tsdn, &arena->pa_shard, &arena_emap_global, base, ind,
&arena->stats.pa_shard_stats, LOCKEDINT_MTX(arena->stats.mtx),
&cur_time, arena_dirty_decay_ms_default_get(),
&cur_time, oversize_threshold, arena_dirty_decay_ms_default_get(),
arena_muzzy_decay_ms_default_get())) {
goto label_error;
}

View File

@@ -151,6 +151,7 @@ CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_oversize_threshold)
CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks)
@@ -431,6 +432,11 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
/*
* Undocumented for now, since we anticipate an arena API in flux after
* we cut the last 5-series release.
*/
{NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
@@ -2530,6 +2536,38 @@ label_return:
return ret;
}
static int
arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
MIB_UNSIGNED(arena_ind, 1);
arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = atomic_load_zu(
&arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
READ(oldval, size_t);
}
if (newp != NULL) {
if (newlen != sizeof(size_t)) {
ret = EINVAL;
goto label_return;
}
atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
*(size_t *)newp, ATOMIC_RELAXED);
}
ret = 0;
label_return:
return ret;
}
static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {

View File

@@ -983,8 +983,9 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata = extent_try_coalesce_large(tsdn, pac, ehooks,
ecache, edata, &coalesced, growing_retained);
} while (coalesced);
if (edata_size_get(edata) >= oversize_threshold &&
extent_may_force_decay(pac)) {
if (edata_size_get(edata) >=
atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
&& extent_may_force_decay(pac)) {
/* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &ecache->mtx);
extent_maximally_purge(tsdn, pac, ehooks, edata);

View File

@@ -17,7 +17,8 @@ pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx,
nstime_t *cur_time, ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
nstime_t *cur_time, size_t oversize_threshold, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms) {
/* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind);
if (edata_cache_init(&shard->edata_cache, base)) {
@@ -25,8 +26,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
}
if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
cur_time, dirty_decay_ms, muzzy_decay_ms, &stats->pac_stats,
stats_mtx)) {
cur_time, oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
&stats->pac_stats, stats_mtx)) {
return true;
}

View File

@@ -37,8 +37,9 @@ pac_decay_data_get(pac_t *pac, extent_state_t state,
bool
pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
malloc_mutex_t *stats_mtx) {
unsigned ind = base_ind_get(base);
/*
* Delay coalescing for dirty extents despite the disruptive effect on
@@ -73,6 +74,8 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
}
atomic_store_zu(&pac->oversize_threshold, oversize_threshold,
ATOMIC_RELAXED);
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
return true;
}