HPA: add size-exclusion functionality.

I.e. only allowing allocations under or over certain sizes.
This commit is contained in:
David Goldblatt 2020-09-04 18:29:28 -07:00 committed by David Goldblatt
parent 484f04733e
commit 534504d4a7
11 changed files with 84 additions and 16 deletions

View File

@ -59,6 +59,17 @@ struct hpa_shard_s {
* fragmentation avoidance measure.
*/
size_t ps_alloc_max;
/*
* What's the maximum size we'll try to allocate out of the shard at
* all?
*/
size_t small_max;
/*
* What's the minimum size for which we'll go straight to the global
* arena?
*/
size_t large_min;
/* The arena ind we're associated with. */
unsigned ind;
};
@ -67,7 +78,7 @@ bool hpa_init(hpa_t *hpa, base_t *base, emap_t *emap,
edata_cache_t *edata_cache);
bool hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa,
edata_cache_t *edata_cache, unsigned ind, size_t ps_goal,
size_t ps_alloc_max);
size_t ps_alloc_max, size_t small_max, size_t large_min);
void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
/*

View File

@ -15,6 +15,8 @@ extern bool opt_confirm_conf;
extern bool opt_hpa;
extern size_t opt_hpa_slab_goal;
extern size_t opt_hpa_slab_max_alloc;
extern size_t opt_hpa_small_max;
extern size_t opt_hpa_large_min;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;

View File

@ -124,7 +124,7 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool pa_shard_enable_hpa(pa_shard_t *shard, hpa_t *hpa, size_t ps_goal,
size_t ps_alloc_max);
size_t ps_alloc_max, size_t small_max, size_t large_min);
/*
* We stop using the HPA when custom extent hooks are installed, but still
* redirect deallocations to it.

View File

@ -1528,7 +1528,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
*/
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
if (pa_shard_enable_hpa(&arena->pa_shard, &arena_hpa_global,
opt_hpa_slab_goal, opt_hpa_slab_max_alloc)) {
opt_hpa_slab_goal, opt_hpa_slab_max_alloc,
opt_hpa_small_max, opt_hpa_large_min)) {
goto label_error;
}
}

View File

@ -93,6 +93,8 @@ CTL_PROTO(opt_confirm_conf)
CTL_PROTO(opt_hpa)
CTL_PROTO(opt_hpa_slab_goal)
CTL_PROTO(opt_hpa_slab_max_alloc)
CTL_PROTO(opt_hpa_small_max)
CTL_PROTO(opt_hpa_large_min)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
@ -348,7 +350,9 @@ static const ctl_named_node_t opt_node[] = {
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
{NAME("hpa"), CTL(opt_hpa)},
{NAME("hpa_slab_goal"), CTL(opt_hpa_slab_goal)},
{NAME("hpa_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
{NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
{NAME("hpa_small_max"), CTL(opt_hpa_small_max)},
{NAME("hpa_large_min"), CTL(opt_hpa_large_min)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
@ -1833,6 +1837,8 @@ CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
CTL_RO_NL_GEN(opt_hpa_slab_goal, opt_hpa_slab_goal, size_t)
CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_slab_max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_small_max, opt_hpa_small_max, size_t)
CTL_RO_NL_GEN(opt_hpa_large_min, opt_hpa_large_min, size_t)
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)

View File

@ -50,7 +50,8 @@ hpa_init(hpa_t *hpa, base_t *base, emap_t *emap, edata_cache_t *edata_cache) {
bool
hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa, edata_cache_t *edata_cache,
unsigned ind, size_t ps_goal, size_t ps_alloc_max) {
unsigned ind, size_t ps_goal, size_t ps_alloc_max, size_t small_max,
size_t large_min) {
bool err;
err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
@ -68,6 +69,8 @@ hpa_shard_init(hpa_shard_t *shard, hpa_t *hpa, edata_cache_t *edata_cache,
psset_init(&shard->psset);
shard->ps_goal = ps_goal;
shard->ps_alloc_max = ps_alloc_max;
shard->small_max = small_max;
shard->large_min = large_min;
/*
* Fill these in last, so that if an hpa_shard gets used despite
@ -195,7 +198,7 @@ hpa_alloc_central(tsdn_t *tsdn, hpa_shard_t *shard, size_t size_min,
static edata_t *
hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
assert(size < shard->ps_alloc_max);
assert(size <= shard->ps_alloc_max);
bool err;
edata_t *edata = edata_cache_get(tsdn, shard->edata_cache);
@ -257,16 +260,18 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero) {
assert((size & PAGE_MASK) == 0);
hpa_shard_t *shard = hpa_from_pai(self);
/* We don't handle alignment or zeroing for now. */
if (alignment > PAGE || zero) {
return NULL;
}
if (size > shard->small_max && size < shard->large_min) {
return NULL;
}
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
hpa_shard_t *shard = hpa_from_pai(self);
edata_t *edata;
if (size <= shard->ps_alloc_max) {
edata = hpa_alloc_psset(tsdn, shard, size);

View File

@ -136,8 +136,10 @@ malloc_mutex_t arenas_lock;
/* The global hpa, and whether it's on. */
bool opt_hpa = false;
size_t opt_hpa_slab_goal = 512 * 1024;
size_t opt_hpa_slab_goal = 128 * 1024;
size_t opt_hpa_slab_max_alloc = 256 * 1024;
size_t opt_hpa_small_max = 32 * 1024;
size_t opt_hpa_large_min = 4 * 1024 * 1024;
/*
* Arenas that are used to service external requests. Not all elements of the
@ -1493,6 +1495,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_SIZE_T(opt_hpa_slab_max_alloc,
"hpa_slab_max_alloc", PAGE, 512 * PAGE,
CONF_CHECK_MIN, CONF_CHECK_MAX, true)
CONF_HANDLE_SIZE_T(opt_hpa_small_max, "hpa_small_max",
PAGE, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true)
CONF_HANDLE_SIZE_T(opt_hpa_large_min, "hpa_large_min",
PAGE, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true)
if (CONF_MATCH("slab_sizes")) {
if (CONF_MATCH_VALUE("default")) {
@ -1801,7 +1807,8 @@ malloc_init_hard_a0_locked() {
return true;
}
if (pa_shard_enable_hpa(&a0->pa_shard, &arena_hpa_global,
opt_hpa_slab_goal, opt_hpa_slab_max_alloc)) {
opt_hpa_slab_goal, opt_hpa_slab_max_alloc,
opt_hpa_small_max, opt_hpa_large_min)) {
return true;
}
}

View File

@ -49,7 +49,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
bool
pa_shard_enable_hpa(pa_shard_t *shard, hpa_t *hpa, size_t ps_goal,
size_t ps_alloc_max) {
size_t ps_alloc_max, size_t small_max, size_t large_min) {
ps_goal &= ~PAGE_MASK;
ps_alloc_max &= ~PAGE_MASK;
@ -57,7 +57,7 @@ pa_shard_enable_hpa(pa_shard_t *shard, hpa_t *hpa, size_t ps_goal,
ps_alloc_max = ps_goal;
}
if (hpa_shard_init(&shard->hpa_shard, hpa, &shard->edata_cache,
shard->ind, ps_goal, ps_alloc_max)) {
shard->ind, ps_goal, ps_alloc_max, small_max, large_min)) {
return true;
}
shard->ever_used_hpa = true;

View File

@ -1098,6 +1098,8 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("hpa")
OPT_WRITE_SIZE_T("hpa_slab_goal")
OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
OPT_WRITE_SIZE_T("hpa_small_max")
OPT_WRITE_SIZE_T("hpa_large_min")
OPT_WRITE_CHAR_P("metadata_thp")
OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")

View File

@ -8,6 +8,9 @@
#define PS_GOAL (128 * PAGE)
#define PS_ALLOC_MAX (64 * PAGE)
#define HPA_SMALL_MAX (200 * PAGE)
#define HPA_LARGE_MIN (300 * PAGE)
typedef struct test_data_s test_data_t;
struct test_data_s {
/*
@ -57,7 +60,8 @@ create_test_data() {
assert_false(err, "");
err = hpa_shard_init(&test_data->shard, &test_data->hpa,
&test_data->shard_edata_cache, SHARD_IND, PS_GOAL, PS_ALLOC_MAX);
&test_data->shard_edata_cache, SHARD_IND, PS_GOAL, PS_ALLOC_MAX,
HPA_SMALL_MAX, HPA_LARGE_MIN);
assert_false(err, "");
return (hpa_shard_t *)test_data;
@ -71,6 +75,31 @@ destroy_test_data(hpa_shard_t *shard) {
free(test_data);
}
TEST_BEGIN(test_small_max_large_min) {
test_skip_if(LG_SIZEOF_PTR != 3);
hpa_shard_t *shard = create_test_data();
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
edata_t *edata;
/* Small max */
edata = pai_alloc(tsdn, &shard->pai, HPA_SMALL_MAX, PAGE, false);
expect_ptr_not_null(edata, "Allocation of small max failed");
edata = pai_alloc(tsdn, &shard->pai, HPA_SMALL_MAX + PAGE, PAGE, false);
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
/* Large min */
edata = pai_alloc(tsdn, &shard->pai, HPA_LARGE_MIN, PAGE, false);
expect_ptr_not_null(edata, "Allocation of large min failed");
edata = pai_alloc(tsdn, &shard->pai, HPA_LARGE_MIN - PAGE, PAGE, false);
expect_ptr_null(edata,
"Allocation of smaller than large min succeeded");
destroy_test_data(shard);
}
TEST_END
typedef struct mem_contents_s mem_contents_t;
struct mem_contents_s {
uintptr_t my_addr;
@ -164,10 +193,10 @@ TEST_BEGIN(test_stress) {
*/
if (operation == 0) {
npages_min = 1;
npages_max = SC_LARGE_MINCLASS / PAGE - 1;
npages_max = HPA_SMALL_MAX / PAGE;
} else {
npages_min = SC_LARGE_MINCLASS / PAGE;
npages_max = 5 * npages_min;
npages_min = HPA_LARGE_MIN / PAGE;
npages_max = HPA_LARGE_MIN / PAGE + 20;
}
size_t npages = npages_min + prng_range_zu(&prng_state,
npages_max - npages_min);
@ -231,5 +260,6 @@ main(void) {
(void)mem_tree_reverse_iter;
(void)mem_tree_destroy;
return test_no_reentrancy(
test_small_max_large_min,
test_stress);
}

View File

@ -164,6 +164,10 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(bool, retain, always);
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(bool, hpa, always);
TEST_MALLCTL_OPT(size_t, hpa_slab_goal, always);
TEST_MALLCTL_OPT(size_t, hpa_slab_max_alloc, always);
TEST_MALLCTL_OPT(size_t, hpa_small_max, always);
TEST_MALLCTL_OPT(size_t, hpa_large_min, always);
TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
TEST_MALLCTL_OPT(size_t, oversize_threshold, always);