HPA: add size-exclusion functionality.

I.e. only allowing allocations under or over certain sizes.
This commit is contained in:
David Goldblatt
2020-09-04 18:29:28 -07:00
committed by David Goldblatt
parent 484f04733e
commit 534504d4a7
11 changed files with 84 additions and 16 deletions

View File

@@ -1528,7 +1528,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
*/
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
if (pa_shard_enable_hpa(&arena->pa_shard, &arena_hpa_global,
opt_hpa_slab_goal, opt_hpa_slab_max_alloc)) {
opt_hpa_slab_goal, opt_hpa_slab_max_alloc,
opt_hpa_small_max, opt_hpa_large_min)) {
goto label_error;
}
}

View File

@@ -93,6 +93,8 @@ CTL_PROTO(opt_confirm_conf)
CTL_PROTO(opt_hpa)
CTL_PROTO(opt_hpa_slab_goal)
CTL_PROTO(opt_hpa_slab_max_alloc)
CTL_PROTO(opt_hpa_small_max)
CTL_PROTO(opt_hpa_large_min)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
@@ -348,7 +350,9 @@ static const ctl_named_node_t opt_node[] = {
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
{NAME("hpa"), CTL(opt_hpa)},
{NAME("hpa_slab_goal"), CTL(opt_hpa_slab_goal)},
{NAME("hpa_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
{NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
{NAME("hpa_small_max"), CTL(opt_hpa_small_max)},
{NAME("hpa_large_min"), CTL(opt_hpa_large_min)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
@@ -1833,6 +1837,8 @@ CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
CTL_RO_NL_GEN(opt_hpa_slab_goal, opt_hpa_slab_goal, size_t)
CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_slab_max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_small_max, opt_hpa_small_max, size_t)
CTL_RO_NL_GEN(opt_hpa_large_min, opt_hpa_large_min, size_t)
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)

View File

@@ -50,7 +50,8 @@ hpa_init(hpa_t *hpa, base_t *base, emap_t *emap, edata_cache_t *edata_cache) {
bool
hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa, edata_cache_t *edata_cache,
unsigned ind, size_t ps_goal, size_t ps_alloc_max) {
unsigned ind, size_t ps_goal, size_t ps_alloc_max, size_t small_max,
size_t large_min) {
bool err;
err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
@@ -68,6 +69,8 @@ hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa, edata_cache_t *edata_cache,
psset_init(&shard->psset);
shard->ps_goal = ps_goal;
shard->ps_alloc_max = ps_alloc_max;
shard->small_max = small_max;
shard->large_min = large_min;
/*
* Fill these in last, so that if an hpa_shard gets used despite
@@ -195,7 +198,7 @@ hpa_alloc_central(tsdn_t *tsdn, hpa_shard_t *shard, size_t size_min,
static edata_t *
hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
assert(size < shard->ps_alloc_max);
assert(size <= shard->ps_alloc_max);
bool err;
edata_t *edata = edata_cache_get(tsdn, shard->edata_cache);
@@ -257,16 +260,18 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero) {
assert((size & PAGE_MASK) == 0);
hpa_shard_t *shard = hpa_from_pai(self);
/* We don't handle alignment or zeroing for now. */
if (alignment > PAGE || zero) {
return NULL;
}
if (size > shard->small_max && size < shard->large_min) {
return NULL;
}
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
hpa_shard_t *shard = hpa_from_pai(self);
edata_t *edata;
if (size <= shard->ps_alloc_max) {
edata = hpa_alloc_psset(tsdn, shard, size);

View File

@@ -136,8 +136,10 @@ malloc_mutex_t arenas_lock;
/* The global hpa, and whether it's on. */
bool opt_hpa = false;
size_t opt_hpa_slab_goal = 512 * 1024;
size_t opt_hpa_slab_goal = 128 * 1024;
size_t opt_hpa_slab_max_alloc = 256 * 1024;
size_t opt_hpa_small_max = 32 * 1024;
size_t opt_hpa_large_min = 4 * 1024 * 1024;
/*
* Arenas that are used to service external requests. Not all elements of the
@@ -1493,6 +1495,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_SIZE_T(opt_hpa_slab_max_alloc,
"hpa_slab_max_alloc", PAGE, 512 * PAGE,
CONF_CHECK_MIN, CONF_CHECK_MAX, true)
CONF_HANDLE_SIZE_T(opt_hpa_small_max, "hpa_small_max",
PAGE, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true)
CONF_HANDLE_SIZE_T(opt_hpa_large_min, "hpa_large_min",
PAGE, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true)
if (CONF_MATCH("slab_sizes")) {
if (CONF_MATCH_VALUE("default")) {
@@ -1801,7 +1807,8 @@ malloc_init_hard_a0_locked() {
return true;
}
if (pa_shard_enable_hpa(&a0->pa_shard, &arena_hpa_global,
opt_hpa_slab_goal, opt_hpa_slab_max_alloc)) {
opt_hpa_slab_goal, opt_hpa_slab_max_alloc,
opt_hpa_small_max, opt_hpa_large_min)) {
return true;
}
}

View File

@@ -49,7 +49,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
bool
pa_shard_enable_hpa(pa_shard_t *shard, hpa_t *hpa, size_t ps_goal,
size_t ps_alloc_max) {
size_t ps_alloc_max, size_t small_max, size_t large_min) {
ps_goal &= ~PAGE_MASK;
ps_alloc_max &= ~PAGE_MASK;
@@ -57,7 +57,7 @@ pa_shard_enable_hpa(pa_shard_t *shard, hpa_t *hpa, size_t ps_goal,
ps_alloc_max = ps_goal;
}
if (hpa_shard_init(&shard->hpa_shard, hpa, &shard->edata_cache,
shard->ind, ps_goal, ps_alloc_max)) {
shard->ind, ps_goal, ps_alloc_max, small_max, large_min)) {
return true;
}
shard->ever_used_hpa = true;

View File

@@ -1098,6 +1098,8 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("hpa")
OPT_WRITE_SIZE_T("hpa_slab_goal")
OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
OPT_WRITE_SIZE_T("hpa_small_max")
OPT_WRITE_SIZE_T("hpa_large_min")
OPT_WRITE_CHAR_P("metadata_thp")
OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")