Add a per-arena oversize_threshold.

This can let manual arenas trade off memory and CPU the way auto arenas do.
This commit is contained in:
David Goldblatt
2020-11-11 13:34:43 -08:00
committed by David Goldblatt
parent 4ca3d91e96
commit cf2549a149
10 changed files with 194 additions and 12 deletions

View File

@@ -123,7 +123,8 @@ pa_shard_ehooks_get(pa_shard_t *shard) {
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx,
nstime_t *cur_time, ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
nstime_t *cur_time, size_t oversize_threshold, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms);
/*
* This isn't exposed to users; we allow late enablement of the HPA shard so

View File

@@ -98,6 +98,9 @@ struct pac_s {
exp_grow_t exp_grow;
malloc_mutex_t grow_mtx;
/* How large extents should be before getting auto-purged. */
atomic_zu_t oversize_threshold;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
@@ -115,8 +118,9 @@ struct pac_s {
};
bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
edata_cache_t *edata_cache, nstime_t *cur_time, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx);
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
malloc_mutex_t *stats_mtx);
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit);
void pac_stats_merge(tsdn_t *tsdn, pac_t *pac, pac_stats_t *pac_stats_out,