From 0b526ff94da7e59aa947a4d3529b2376794f8b01 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Mon, 13 Feb 2012 18:04:26 -0800 Subject: [PATCH] Remove the opt.lg_prof_tcmax option. Remove the opt.lg_prof_tcmax option and hard-code a cache size of 1024. This setting is something that users just shouldn't have to worry about. If lock contention actually ends up being a problem, the simple solution available to the user is to reduce sampling frequency. --- doc/jemalloc.xml.in | 26 ++------------------------ include/jemalloc/internal/prof.h | 5 +++-- src/ctl.c | 5 +---- src/jemalloc.c | 2 -- src/prof.c | 8 ++------ src/stats.c | 12 ------------ 6 files changed, 8 insertions(+), 50 deletions(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 4c7023b5..2e5f10e3 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -938,8 +938,6 @@ malloc_conf = "xmalloc:true";]]> option for probabilistic sampling control. See the opt.prof_accum option for control of cumulative sample reporting. See the opt.lg_prof_tcmax - option for control of per thread backtrace caching. See the opt.lg_prof_interval option for information on interval-triggered profile dumping, and the opt.prof_gdump @@ -1017,28 +1015,8 @@ malloc_conf = "xmalloc:true";]]> dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the - cumulative counts are not always of interest. See the - opt.lg_prof_tcmax - option for control of per thread backtrace caching, which has important - interactions. This option is enabled by default. - - - - - opt.lg_prof_tcmax - (ssize_t) - r- - [] - - Maximum per thread backtrace cache (log base 2) used - for heap profiling. A backtrace can only be discarded if the - opt.prof_accum - option is disabled, and no thread caches currently refer to the - backtrace. Therefore, a backtrace cache limit should be imposed if the - intention is to limit how much memory is used by backtraces. By - default, no limit is imposed (encoded as -1). - + cumulative counts are not always of interest. This option is enabled + by default. diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 98f96546..ad8bcd2c 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -12,7 +12,9 @@ typedef struct prof_tdata_s prof_tdata_t; #define LG_PROF_BT_MAX_DEFAULT 7 #define LG_PROF_SAMPLE_DEFAULT 0 #define LG_PROF_INTERVAL_DEFAULT -1 -#define LG_PROF_TCMAX_DEFAULT -1 + +/* Maximum number of backtraces to store in each per thread LRU cache. */ +#define PROF_TCMAX 1024 /* * Hard limit on stack backtrace depth. Note that the version of @@ -167,7 +169,6 @@ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern ssize_t opt_lg_prof_tcmax; /* lg(max per thread bactrace cache) */ extern char opt_prof_prefix[PATH_MAX + 1]; /* diff --git a/src/ctl.c b/src/ctl.c index e33ce67d..12b41857 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -80,7 +80,6 @@ CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) -CTL_PROTO(opt_lg_prof_tcmax) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_run_size) @@ -222,8 +221,7 @@ static const ctl_node_t opt_node[] = { {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)}, - {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)} + {NAME("prof_accum"), CTL(opt_prof_accum)} }; static const ctl_node_t arenas_bin_i_node[] = { @@ -1133,7 +1131,6 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t) /******************************************************************************/ diff --git a/src/jemalloc.c b/src/jemalloc.c index 796c8158..d2a6009f 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -603,8 +603,6 @@ malloc_conf_init(void) CONF_HANDLE_SSIZE_T(lg_prof_sample, 0, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(prof_accum) - CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1, - (sizeof(size_t) << 3) - 1) CONF_HANDLE_SSIZE_T(lg_prof_interval, -1, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(prof_gdump) diff --git a/src/prof.c b/src/prof.c index 113cf15a..a4012f04 100644 --- a/src/prof.c +++ b/src/prof.c @@ -22,7 +22,6 @@ ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; bool opt_prof_leak = false; bool opt_prof_accum = true; -ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT; char opt_prof_prefix[PATH_MAX + 1]; uint64_t prof_interval; @@ -519,8 +518,7 @@ prof_lookup(prof_bt_t *bt) prof_leave(); /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (opt_lg_prof_tcmax >= 0 && ckh_count(&prof_tdata->bt2cnt) - == (ZU(1) << opt_lg_prof_tcmax)) { + if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { assert(ckh_count(&prof_tdata->bt2cnt) > 0); /* * Flush the least recently used cnt in order to keep @@ -535,9 +533,7 @@ prof_lookup(prof_bt_t *bt) prof_ctx_merge(ret.p->ctx, ret.p); /* ret can now be re-used. */ } else { - assert(opt_lg_prof_tcmax < 0 || - ckh_count(&prof_tdata->bt2cnt) < (ZU(1) << - opt_lg_prof_tcmax)); + assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); /* Allocate and partially initialize a new cnt. */ ret.v = imalloc(sizeof(prof_thr_cnt_t)); if (ret.p == NULL) { diff --git a/src/stats.c b/src/stats.c index 1e907823..86a48c60 100644 --- a/src/stats.c +++ b/src/stats.c @@ -515,7 +515,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, OPT_WRITE_BOOL(prof_active) OPT_WRITE_SSIZE_T(lg_prof_sample) OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_tcmax) OPT_WRITE_SSIZE_T(lg_prof_interval) OPT_WRITE_BOOL(prof_gdump) OPT_WRITE_BOOL(prof_leak) @@ -622,17 +621,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, write_cb(cbopaque, u2s((1U << sv), 10, s)); write_cb(cbopaque, "\n"); - CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t); - write_cb(cbopaque, - "Maximum per thread backtrace cache: "); - if (ssv >= 0) { - write_cb(cbopaque, u2s((1U << ssv), 10, s)); - write_cb(cbopaque, " (2^"); - write_cb(cbopaque, u2s(ssv, 10, s)); - write_cb(cbopaque, ")\n"); - } else - write_cb(cbopaque, "N/A\n"); - CTL_GET("opt.lg_prof_sample", &sv, size_t); write_cb(cbopaque, "Average profile sample interval: "); write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));