From 79f81a3732c434e9b648561bf8ab6ab6bf74385a Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Wed, 9 Dec 2020 15:55:17 -0800 Subject: [PATCH] HPA: Make dirty_mult configurable. --- include/jemalloc/internal/hpa_opts.h | 11 ++++++++++- src/ctl.c | 7 +++++++ src/hpa.c | 7 ++++++- src/jemalloc.c | 18 ++++++++++++++++++ src/stats.c | 21 ++++++++++++++++++++- 5 files changed, 61 insertions(+), 3 deletions(-) diff --git a/include/jemalloc/internal/hpa_opts.h b/include/jemalloc/internal/hpa_opts.h index 0ed1c417..5ff00725 100644 --- a/include/jemalloc/internal/hpa_opts.h +++ b/include/jemalloc/internal/hpa_opts.h @@ -1,6 +1,8 @@ #ifndef JEMALLOC_INTERNAL_HPA_OPTS_H #define JEMALLOC_INTERNAL_HPA_OPTS_H +#include "jemalloc/internal/fxp.h" + /* * This file is morally part of hpa.h, but is split out for header-ordering * reasons. @@ -25,6 +27,11 @@ struct hpa_shard_opts_s { * dehugification_threshold, we force dehugify it. */ size_t dehugification_threshold; + /* + * The HPA purges whenever the number of pages exceeds dirty_mult * + * active_pages. This may be set to (fxp_t)-1 to disable purging. + */ + fxp_t dirty_mult; }; #define HPA_SHARD_OPTS_DEFAULT { \ @@ -33,7 +40,9 @@ struct hpa_shard_opts_s { /* hugification_threshold */ \ HUGEPAGE * 95 / 100, \ /* dehugification_threshold */ \ - HUGEPAGE * 20 / 100 \ + HUGEPAGE * 20 / 100, \ + /* dirty_mult */ \ + FXP_INIT_PERCENT(25) \ } #endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */ diff --git a/src/ctl.c b/src/ctl.c index ba667b5b..1c5e32ba 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -96,6 +96,7 @@ CTL_PROTO(opt_hpa) CTL_PROTO(opt_hpa_slab_max_alloc) CTL_PROTO(opt_hpa_hugification_threshold) CTL_PROTO(opt_hpa_dehugification_threshold) +CTL_PROTO(opt_hpa_dirty_mult) CTL_PROTO(opt_hpa_sec_max_alloc) CTL_PROTO(opt_hpa_sec_max_bytes) CTL_PROTO(opt_hpa_sec_nshards) @@ -402,6 +403,7 @@ static const ctl_named_node_t opt_node[] = { CTL(opt_hpa_hugification_threshold)}, {NAME("hpa_dehugification_threshold"), CTL(opt_hpa_dehugification_threshold)}, + {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)}, {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)}, {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)}, {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)}, @@ -2101,6 +2103,11 @@ CTL_RO_NL_GEN(opt_hpa_hugification_threshold, opt_hpa_opts.hugification_threshold, size_t) CTL_RO_NL_GEN(opt_hpa_dehugification_threshold, opt_hpa_opts.dehugification_threshold, size_t) +/* + * This will have to change before we publicly document this option; fxp_t and + * its representation are internal implementation details. + */ +CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t) CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_max_alloc, size_t) CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_max_bytes, size_t) CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_nshards, size_t) diff --git a/src/hpa.c b/src/hpa.c index 0e704b8c..3c706cbf 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -147,13 +147,18 @@ hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) { static bool hpa_should_purge(hpa_shard_t *shard) { + if (shard->opts.dirty_mult == (fxp_t)-1) { + return false; + } size_t adjusted_ndirty = psset_ndirty(&shard->psset) - shard->npending_purge; /* * Another simple static check; purge whenever dirty exceeds 25% of * active. */ - return adjusted_ndirty > psset_nactive(&shard->psset) / 4; + size_t max_ndirty = fxp_mul_frac(psset_nactive(&shard->psset), + shard->opts.dirty_mult); + return adjusted_ndirty > max_ndirty; } static void diff --git a/src/jemalloc.c b/src/jemalloc.c index fe8e09e6..c2c75fa5 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1458,6 +1458,24 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_CONTINUE; } + if (CONF_MATCH("hpa_dirty_mult")) { + if (CONF_MATCH_VALUE("-1")) { + opt_hpa_opts.dirty_mult = (fxp_t)-1; + CONF_CONTINUE; + } + fxp_t ratio; + char *end; + bool err = fxp_parse(&ratio, v, + &end); + if (err || (size_t)(end - v) != vlen) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } else { + opt_hpa_opts.dirty_mult = ratio; + } + CONF_CONTINUE; + } + CONF_HANDLE_SIZE_T(opt_hpa_sec_max_alloc, "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true); CONF_HANDLE_SIZE_T(opt_hpa_sec_max_bytes, "hpa_sec_max_bytes", diff --git a/src/stats.c b/src/stats.c index 7a0f20bf..1a7e6e4e 100644 --- a/src/stats.c +++ b/src/stats.c @@ -4,6 +4,7 @@ #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/fxp.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/prof_stats.h" @@ -1375,13 +1376,14 @@ stats_general_print(emitter_t *emitter) { uint64_t u64v; int64_t i64v; ssize_t ssv, ssv2; - size_t sv, bsz, usz, i64sz, ssz, sssz, cpsz; + size_t sv, bsz, usz, u32sz, i64sz, ssz, sssz, cpsz; bsz = sizeof(bool); usz = sizeof(unsigned); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); + u32sz = sizeof(uint32_t); i64sz = sizeof(int64_t); CTL_GET("version", &cpv, const char *); @@ -1466,6 +1468,23 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_SIZE_T("hpa_slab_max_alloc") OPT_WRITE_SIZE_T("hpa_hugification_threshold") OPT_WRITE_SIZE_T("hpa_dehugification_threshold") + if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0) + == 0) { + /* + * We cheat a little and "know" the secret meaning of this + * representation. + */ + if (u32v == (uint32_t)-1) { + emitter_kv(emitter, "hpa_dirty_mult", + "opt.hpa_dirty_mult", emitter_type_string, "-1"); + } else { + char buf[FXP_BUF_SIZE]; + fxp_print(u32v, buf); + const char *bufp = buf; + emitter_kv(emitter, "hpa_dirty_mult", + "opt.hpa_dirty_mult", emitter_type_string, &bufp); + } + } OPT_WRITE_SIZE_T("hpa_sec_max_alloc") OPT_WRITE_SIZE_T("hpa_sec_max_bytes") OPT_WRITE_SIZE_T("hpa_sec_nshards")