Remove the opt.lg_prof_tcmax option.

Remove the opt.lg_prof_tcmax option and hard-code a cache size of 1024.
This setting is something that users just shouldn't have to worry about.
If lock contention actually ends up being a problem, the simple solution
available to the user is to reduce sampling frequency.
This commit is contained in:
Jason Evans
2012-02-13 18:04:26 -08:00
parent e7a1058aaa
commit 0b526ff94d
6 changed files with 8 additions and 50 deletions

View File

@@ -80,7 +80,6 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_lg_prof_tcmax)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -222,8 +221,7 @@ static const ctl_node_t opt_node[] = {
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
{NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
static const ctl_node_t arenas_bin_i_node[] = {
@@ -1133,7 +1131,6 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
/******************************************************************************/

View File

@@ -603,8 +603,6 @@ malloc_conf_init(void)
CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(prof_accum)
CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(prof_gdump)

View File

@@ -22,7 +22,6 @@ ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_leak = false;
bool opt_prof_accum = true;
ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT;
char opt_prof_prefix[PATH_MAX + 1];
uint64_t prof_interval;
@@ -519,8 +518,7 @@ prof_lookup(prof_bt_t *bt)
prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */
if (opt_lg_prof_tcmax >= 0 && ckh_count(&prof_tdata->bt2cnt)
== (ZU(1) << opt_lg_prof_tcmax)) {
if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
assert(ckh_count(&prof_tdata->bt2cnt) > 0);
/*
* Flush the least recently used cnt in order to keep
@@ -535,9 +533,7 @@ prof_lookup(prof_bt_t *bt)
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
} else {
assert(opt_lg_prof_tcmax < 0 ||
ckh_count(&prof_tdata->bt2cnt) < (ZU(1) <<
opt_lg_prof_tcmax));
assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {

View File

@@ -515,7 +515,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_tcmax)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_leak)
@@ -622,17 +621,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
write_cb(cbopaque, u2s((1U << sv), 10, s));
write_cb(cbopaque, "\n");
CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
write_cb(cbopaque,
"Maximum per thread backtrace cache: ");
if (ssv >= 0) {
write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
CTL_GET("opt.lg_prof_sample", &sv, size_t);
write_cb(cbopaque, "Average profile sample interval: ");
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));