Implement opt.stats_interval and the _opts options.

Add options stats_interval and stats_interval_opts to allow interval based stats
printing.  This provides an easy way to collect stats without code changes,
because opt.stats_print may not work (some binaries never exit).
This commit is contained in:
Qi Wang
2020-01-13 22:29:17 -08:00
committed by Qi Wang
parent d71a145ec1
commit 88b0e03a4e
14 changed files with 334 additions and 50 deletions

View File

@@ -96,6 +96,8 @@ CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_stats_interval)
CTL_PROTO(opt_stats_interval_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace)
@@ -329,6 +331,8 @@ static const ctl_named_node_t opt_node[] = {
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
{NAME("stats_interval"), CTL(opt_stats_interval)},
{NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("utrace"), CTL(opt_utrace)},
@@ -1791,6 +1795,8 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)

View File

@@ -775,8 +775,8 @@ malloc_ncpus(void) {
}
static void
init_opt_stats_print_opts(const char *v, size_t vlen) {
size_t opts_len = strlen(opt_stats_print_opts);
init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
size_t opts_len = strlen(dest);
assert(opts_len <= stats_print_tot_num_options);
for (size_t i = 0; i < vlen; i++) {
@@ -787,16 +787,16 @@ init_opt_stats_print_opts(const char *v, size_t vlen) {
default: continue;
}
if (strchr(opt_stats_print_opts, v[i]) != NULL) {
if (strchr(dest, v[i]) != NULL) {
/* Ignore repeated. */
continue;
}
opt_stats_print_opts[opts_len++] = v[i];
opt_stats_print_opts[opts_len] = '\0';
dest[opts_len++] = v[i];
dest[opts_len] = '\0';
assert(opts_len <= stats_print_tot_num_options);
}
assert(opts_len == strlen(opt_stats_print_opts));
assert(opts_len == strlen(dest));
}
/* Reads the next size pair in a multi-sized option. */
@@ -1118,39 +1118,47 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define CONF_CHECK_MIN(um, min) ((um) < (min))
#define CONF_DONT_CHECK_MAX(um, max) false
#define CONF_CHECK_MAX(um, max) ((um) > (max))
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
max_t mv; \
char *end; \
\
set_errno(0); \
um = malloc_strtoumax(v, &end, 0); \
mv = (max_t)malloc_strtoumax(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
} else if (clip) { \
if (check_min(um, (t)(min))) { \
if (check_min(mv, (t)(min))) { \
o = (t)(min); \
} else if ( \
check_max(um, (t)(max))) { \
check_max(mv, (t)(max))) { \
o = (t)(max); \
} else { \
o = (t)um; \
o = (t)mv; \
} \
} else { \
if (check_min(um, (t)(min)) || \
check_max(um, (t)(max))) { \
if (check_min(mv, (t)(min)) || \
check_max(mv, (t)(max))) { \
CONF_ERROR( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else { \
o = (t)um; \
o = (t)mv; \
} \
} \
CONF_CONTINUE; \
}
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \
check_max, clip)
#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \
check_max, clip)
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
@@ -1158,27 +1166,12 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
set_errno(0); \
l = strtol(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
} else if (l < (ssize_t)(min) || l > \
(ssize_t)(max)) { \
CONF_ERROR( \
"Out-of-range conf value", \
k, klen, v, vlen); \
} else { \
o = l; \
} \
CONF_CONTINUE; \
}
CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \
CONF_CHECK_MIN, CONF_CHECK_MAX, false)
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
@@ -1275,7 +1268,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
SSIZE_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (CONF_MATCH("stats_print_opts")) {
init_opt_stats_print_opts(v, vlen);
init_opt_stats_opts(v, vlen,
opt_stats_print_opts);
CONF_CONTINUE;
}
CONF_HANDLE_INT64_T(opt_stats_interval,
"stats_interval", -1, INT64_MAX,
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
if (CONF_MATCH("stats_interval_opts")) {
init_opt_stats_opts(v, vlen,
opt_stats_interval_opts);
CONF_CONTINUE;
}
if (config_fill) {
@@ -1463,7 +1465,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#undef CONF_CHECK_MIN
#undef CONF_DONT_CHECK_MAX
#undef CONF_CHECK_MAX
#undef CONF_HANDLE_T
#undef CONF_HANDLE_T_U
#undef CONF_HANDLE_T_SIGNED
#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
@@ -1545,7 +1549,6 @@ malloc_init_hard_a0_locked() {
prof_boot0();
}
malloc_conf_init(&sc_data, bin_shard_sizes);
thread_event_boot();
sz_boot(&sc_data);
bin_info_boot(&sc_data, bin_shard_sizes);
@@ -1558,6 +1561,10 @@ malloc_init_hard_a0_locked() {
}
}
}
if (stats_boot()) {
return true;
}
if (pages_boot()) {
return true;
}
@@ -1573,6 +1580,7 @@ malloc_init_hard_a0_locked() {
if (config_prof) {
prof_boot1();
}
thread_event_boot();
arena_boot(&sc_data);
if (tcache_boot(TSDN_NULL)) {
return true;

View File

@@ -571,7 +571,10 @@ void
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
cassert(config_prof);
return counter_rollback(tsdn, &prof_idump_accumulated, usize);
/* Rollback is only done on arena_prof_promote of small sizes. */
assert(SC_LARGE_MINCLASS > usize);
return counter_rollback(tsdn, &prof_idump_accumulated,
SC_LARGE_MINCLASS - usize);
}
bool

View File

@@ -50,6 +50,13 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
bool opt_stats_print = false;
char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
static counter_accum_t stats_interval_accumulated;
/* Per thread batch accum size for stats_interval. */
static uint64_t stats_interval_accum_batch;
/******************************************************************************/
static uint64_t
@@ -1000,14 +1007,16 @@ stats_general_print(emitter_t *emitter) {
unsigned uv;
uint32_t u32v;
uint64_t u64v;
int64_t i64v;
ssize_t ssv, ssv2;
size_t sv, bsz, usz, ssz, sssz, cpsz;
size_t sv, bsz, usz, i64sz, ssz, sssz, cpsz;
bsz = sizeof(bool);
usz = sizeof(unsigned);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
i64sz = sizeof(int64_t);
CTL_GET("version", &cpv, const char *);
emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
@@ -1063,6 +1072,9 @@ stats_general_print(emitter_t *emitter) {
#define OPT_WRITE_UNSIGNED(name) \
OPT_WRITE(name, uv, usz, emitter_type_unsigned)
#define OPT_WRITE_INT64(name) \
OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
#define OPT_WRITE_SIZE_T(name) \
OPT_WRITE(name, sv, ssz, emitter_type_size)
#define OPT_WRITE_SSIZE_T(name) \
@@ -1109,6 +1121,10 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("prof_leak")
OPT_WRITE_BOOL("stats_print")
OPT_WRITE_CHAR_P("stats_print_opts")
OPT_WRITE_BOOL("stats_print")
OPT_WRITE_CHAR_P("stats_print_opts")
OPT_WRITE_INT64("stats_interval")
OPT_WRITE_CHAR_P("stats_interval_opts")
OPT_WRITE_CHAR_P("zero_realloc")
emitter_dict_end(emitter);
@@ -1477,3 +1493,37 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
emitter_end(&emitter);
}
bool
stats_interval_accum(tsd_t *tsd, uint64_t bytes) {
return counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated, bytes);
}
uint64_t
stats_interval_accum_batch_size(void) {
return stats_interval_accum_batch;
}
bool
stats_boot(void) {
uint64_t stats_interval;
if (opt_stats_interval < 0) {
assert(opt_stats_interval == -1);
stats_interval = 0;
stats_interval_accum_batch = 0;
} else{
/* See comments in stats.h */
stats_interval = (opt_stats_interval > 0) ?
opt_stats_interval : 1;
uint64_t batch = stats_interval >>
STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
} else if (batch == 0) {
batch = 1;
}
stats_interval_accum_batch = batch;
}
return counter_accum_init(&stats_interval_accumulated, stats_interval);
}

View File

@@ -25,6 +25,7 @@ static void thread_##event##_event_handler(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS
#undef E
/* (Re)Init functions. */
static void
tsd_thread_tcache_gc_event_init(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
@@ -37,11 +38,19 @@ tsd_thread_prof_sample_event_init(tsd_t *tsd) {
prof_sample_threshold_update(tsd);
}
static void
tsd_thread_stats_interval_event_init(tsd_t *tsd) {
assert(opt_stats_interval >= 0);
uint64_t interval = stats_interval_accum_batch_size();
thread_stats_interval_event_update(tsd, interval);
}
/* Handler functions. */
static void
thread_tcache_gc_event_handler(tsd_t *tsd) {
assert(TCACHE_GC_INCR_BYTES > 0);
assert(tcache_gc_event_wait_get(tsd) == 0U);
thread_tcache_gc_event_update(tsd, TCACHE_GC_INCR_BYTES);
tsd_thread_tcache_gc_event_init(tsd);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_event_hard(tsd, tcache);
@@ -71,6 +80,21 @@ thread_prof_sample_event_handler(tsd_t *tsd) {
}
}
static void
thread_stats_interval_event_handler(tsd_t *tsd) {
assert(opt_stats_interval >= 0);
assert(stats_interval_event_wait_get(tsd) == 0U);
uint64_t last_event = thread_allocated_last_event_get(tsd);
uint64_t last_stats_event = stats_interval_last_event_get(tsd);
stats_interval_last_event_set(tsd, last_event);
if (stats_interval_accum(tsd, last_event - last_stats_event)) {
je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
}
tsd_thread_stats_interval_event_init(tsd);
}
/* Per event facilities done. */
static uint64_t
thread_allocated_next_event_compute(tsd_t *tsd) {
uint64_t wait = THREAD_EVENT_MAX_START_WAIT;