Pull HPA options into a containing struct.

Currently that just means max_alloc, but we're about to add more.  While we're
touching these lines anyways, tweak things to be more in line with testing.
This commit is contained in:
David Goldblatt 2020-12-08 16:33:39 -08:00 committed by David Goldblatt
parent bdb7307ff2
commit b3df80bc79
10 changed files with 52 additions and 28 deletions

View File

@ -2,7 +2,7 @@
#define JEMALLOC_INTERNAL_HPA_H
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/hpa_central.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
@ -64,14 +64,6 @@ struct hpa_shard_s {
psset_t psset;
/*
* The largest size we'll allocate out of the shard. For those
* allocations refused, the caller (in practice, the PA module) will
* fall back to the more general (for now) PAC, which can always handle
* any allocation request.
*/
size_t alloc_max;
/*
* How many grow operations have occurred.
*
@ -93,6 +85,9 @@ struct hpa_shard_s {
unsigned ind;
emap_t *emap;
/* The configuration choices for this hpa shard. */
hpa_shard_opts_t opts;
/*
* How many pages have we started but not yet finished purging in this
* hpa shard.
@ -113,7 +108,7 @@ struct hpa_shard_s {
*/
bool hpa_supported();
bool hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
edata_cache_t *edata_cache, unsigned ind, size_t alloc_max);
edata_cache_t *edata_cache, unsigned ind, const hpa_shard_opts_t *opts);
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,

View File

@ -0,0 +1,25 @@
#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
#define JEMALLOC_INTERNAL_HPA_OPTS_H
/*
* This file is morally part of hpa.h, but is split out for header-ordering
* reasons.
*/
typedef struct hpa_shard_opts_s hpa_shard_opts_t;
struct hpa_shard_opts_s {
/*
* The largest size we'll allocate out of the shard. For those
* allocations refused, the caller (in practice, the PA module) will
* fall back to the more general (for now) PAC, which can always handle
* any allocation request.
*/
size_t slab_max_alloc;
};
#define HPA_SHARD_OPTS_DEFAULT { \
/* slab_max_alloc */ \
64 * 1024 \
}
#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/tsd_types.h"
#include "jemalloc/internal/nstime.h"
@ -14,8 +15,7 @@ extern bool opt_abort_conf;
extern bool opt_trust_madvise;
extern bool opt_confirm_conf;
extern bool opt_hpa;
extern size_t opt_hpa_slab_max_alloc;
extern hpa_shard_opts_t opt_hpa_opts;
extern size_t opt_hpa_sec_max_alloc;
extern size_t opt_hpa_sec_max_bytes;
extern size_t opt_hpa_sec_nshards;

View File

@ -130,7 +130,7 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* This isn't exposed to users; we allow late enablement of the HPA shard so
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool pa_shard_enable_hpa(pa_shard_t *shard, size_t alloc_max,
bool pa_shard_enable_hpa(pa_shard_t *shard, const hpa_shard_opts_t *hpa_opts,
size_t sec_nshards, size_t sec_alloc_max, size_t sec_bytes_max);
/*
* We stop using the HPA when custom extent hooks are installed, but still

View File

@ -1480,8 +1480,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
*/
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
if (pa_shard_enable_hpa(&arena->pa_shard,
opt_hpa_slab_max_alloc, opt_hpa_sec_nshards,
opt_hpa_sec_max_alloc, opt_hpa_sec_max_bytes)) {
&opt_hpa_opts, opt_hpa_sec_nshards, opt_hpa_sec_max_alloc,
opt_hpa_sec_max_bytes)) {
goto label_error;
}
}

View File

@ -2090,7 +2090,7 @@ CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_slab_max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_max_bytes, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_nshards, size_t)

View File

@ -48,7 +48,7 @@ hpa_supported() {
bool
hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
edata_cache_t *edata_cache, unsigned ind, size_t alloc_max) {
edata_cache_t *edata_cache, unsigned ind, const hpa_shard_opts_t *opts) {
/* malloc_conf processing should have filtered out these cases. */
assert(hpa_supported());
bool err;
@ -67,13 +67,14 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
shard->base = base;
edata_cache_small_init(&shard->ecs, edata_cache);
psset_init(&shard->psset);
shard->alloc_max = alloc_max;
shard->age_counter = 0;
shard->eden = NULL;
shard->eden_len = 0;
shard->ind = ind;
shard->emap = emap;
shard->opts = *opts;
shard->npending_purge = 0;
shard->stats.npurge_passes = 0;
@ -489,7 +490,7 @@ hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom)
static edata_t *
hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
assert(size <= shard->alloc_max);
assert(size <= shard->opts.slab_max_alloc);
bool err;
bool oom;
edata_t *edata;
@ -614,7 +615,7 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
if (alignment > PAGE || zero) {
return NULL;
}
if (size > shard->alloc_max) {
if (size > shard->opts.slab_max_alloc) {
return NULL;
}

View File

@ -144,7 +144,7 @@ malloc_mutex_t arenas_lock;
/* The global hpa, and whether it's on. */
bool opt_hpa = false;
size_t opt_hpa_slab_max_alloc = 256 * 1024;
hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT;
size_t opt_hpa_sec_max_alloc = 32 * 1024;
/* These settings correspond to a maximum of 1MB cached per arena. */
@ -1410,8 +1410,8 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_CHECK_MIN, CONF_CHECK_MAX,
true);
CONF_HANDLE_BOOL(opt_hpa, "hpa")
CONF_HANDLE_SIZE_T(opt_hpa_slab_max_alloc,
"hpa_slab_max_alloc", PAGE, 512 * PAGE,
CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
"hpa_slab_max_alloc", PAGE, HUGEPAGE,
CONF_CHECK_MIN, CONF_CHECK_MAX, true);
CONF_HANDLE_SIZE_T(opt_hpa_sec_max_alloc, "hpa_sec_max_alloc",
@ -1717,7 +1717,7 @@ malloc_init_hard_a0_locked() {
opt_hpa = false;
}
} else if (opt_hpa) {
if (pa_shard_enable_hpa(&a0->pa_shard, opt_hpa_slab_max_alloc,
if (pa_shard_enable_hpa(&a0->pa_shard, &opt_hpa_opts,
opt_hpa_sec_nshards, opt_hpa_sec_max_alloc,
opt_hpa_sec_max_bytes)) {
return true;

View File

@ -49,10 +49,10 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
}
bool
pa_shard_enable_hpa(pa_shard_t *shard, size_t alloc_max, size_t sec_nshards,
size_t sec_alloc_max, size_t sec_bytes_max) {
pa_shard_enable_hpa(pa_shard_t *shard, const hpa_shard_opts_t *hpa_opts,
size_t sec_nshards, size_t sec_alloc_max, size_t sec_bytes_max) {
if (hpa_shard_init(&shard->hpa_shard, shard->emap, shard->base,
&shard->edata_cache, shard->ind, alloc_max)) {
&shard->edata_cache, shard->ind, hpa_opts)) {
return true;
}
if (sec_init(&shard->hpa_sec, &shard->hpa_shard.pai, sec_nshards,

View File

@ -37,9 +37,12 @@ create_test_data() {
err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
assert_false(err, "");
hpa_shard_opts_t opts = HPA_SHARD_OPTS_DEFAULT;
opts.slab_max_alloc = ALLOC_MAX;
err = hpa_shard_init(&test_data->shard, &test_data->emap,
test_data->base, &test_data->shard_edata_cache, SHARD_IND,
ALLOC_MAX);
&opts);
assert_false(err, "");
return (hpa_shard_t *)test_data;