Implement opt.stats_interval and the _opts options.
Add options stats_interval and stats_interval_opts to allow interval based stats printing. This provides an easy way to collect stats without code changes, because opt.stats_print may not work (some binaries never exit).
This commit is contained in:
parent
d71a145ec1
commit
88b0e03a4e
@ -191,6 +191,7 @@ TESTS_UNIT := \
|
||||
$(srcroot)test/unit/buf_writer.c \
|
||||
$(srcroot)test/unit/cache_bin.c \
|
||||
$(srcroot)test/unit/ckh.c \
|
||||
$(srcroot)test/unit/counter.c \
|
||||
$(srcroot)test/unit/decay.c \
|
||||
$(srcroot)test/unit/div.c \
|
||||
$(srcroot)test/unit/emitter.c \
|
||||
|
@ -1185,6 +1185,41 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
enabled. The default is <quote></quote>.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.stats_interval">
|
||||
<term>
|
||||
<mallctl>opt.stats_interval</mallctl>
|
||||
(<type>int64_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Average interval between statistics outputs, as measured
|
||||
in bytes of allocation activity. The actual interval may be sporadic
|
||||
because decentralized event counters are used to avoid synchronization
|
||||
bottlenecks. The output may be triggered on any thread, which then
|
||||
calls <function>malloc_stats_print()</function>. <link
|
||||
linkend="opt.stats_interval_opts"><mallctl>opt.stats_interval_opts</mallctl></link>
|
||||
can be combined to specify output options. By default,
|
||||
interval-triggered stats output is disabled (encoded as
|
||||
-1).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.stats_interval_opts">
|
||||
<term>
|
||||
<mallctl>opt.stats_interval_opts</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Options (the <parameter>opts</parameter> string) to pass
|
||||
to the <function>malloc_stats_print()</function> for interval based
|
||||
statistics printing (enabled
|
||||
through <link
|
||||
linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link>). See
|
||||
available options in <link
|
||||
linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
|
||||
Has no effect unless <link
|
||||
linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link> is
|
||||
enabled. The default is <quote></quote>.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.junk">
|
||||
<term>
|
||||
<mallctl>opt.junk</mallctl>
|
||||
|
@ -6,11 +6,11 @@
|
||||
typedef struct counter_accum_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
uint64_t accumbytes;
|
||||
uint64_t accumbytes;
|
||||
#else
|
||||
atomic_u64_t accumbytes;
|
||||
atomic_u64_t accumbytes;
|
||||
#endif
|
||||
uint64_t interval;
|
||||
uint64_t interval;
|
||||
} counter_accum_t;
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
@ -52,7 +52,7 @@ counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, size_t usize) {
|
||||
counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
|
||||
/*
|
||||
* Cancel out as much of the excessive accumbytes increase as possible
|
||||
* without underflowing. Interval-triggered events occur slightly more
|
||||
@ -63,16 +63,14 @@ counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, size_t usize) {
|
||||
a0 = atomic_load_u64(&counter->accumbytes,
|
||||
ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
a1 = (a0 >= bytes) ? a0 - bytes : 0;
|
||||
} while (!atomic_compare_exchange_weak_u64(
|
||||
&counter->accumbytes, &a0, a1, ATOMIC_RELAXED,
|
||||
ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &counter->mtx);
|
||||
a0 = counter->accumbytes;
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
a1 = (a0 >= bytes) ? a0 - bytes : 0;
|
||||
counter->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &counter->mtx);
|
||||
#endif
|
||||
|
@ -22,6 +22,7 @@ typedef enum emitter_type_e emitter_type_t;
|
||||
enum emitter_type_e {
|
||||
emitter_type_bool,
|
||||
emitter_type_int,
|
||||
emitter_type_int64,
|
||||
emitter_type_unsigned,
|
||||
emitter_type_uint32,
|
||||
emitter_type_uint64,
|
||||
@ -149,6 +150,9 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
|
||||
case emitter_type_int:
|
||||
EMIT_SIMPLE(int, "%d")
|
||||
break;
|
||||
case emitter_type_int64:
|
||||
EMIT_SIMPLE(int64_t, "%" FMTd64)
|
||||
break;
|
||||
case emitter_type_unsigned:
|
||||
EMIT_SIMPLE(unsigned, "%u")
|
||||
break;
|
||||
|
@ -24,8 +24,26 @@ enum {
|
||||
extern bool opt_stats_print;
|
||||
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||
|
||||
/* Utilities for stats_interval. */
|
||||
extern int64_t opt_stats_interval;
|
||||
extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
|
||||
|
||||
#define STATS_INTERVAL_DEFAULT -1
|
||||
/*
|
||||
* Batch-increment the counter to reduce synchronization overhead. Each thread
|
||||
* merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
|
||||
* BATCH_MAX for accuracy when the interval is huge (which is expected).
|
||||
*/
|
||||
#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
|
||||
#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
|
||||
|
||||
uint64_t stats_interval_accum_batch_size(void);
|
||||
bool stats_interval_accum(tsd_t *tsd, uint64_t bytes);
|
||||
|
||||
/* Implements je_malloc_stats_print. */
|
||||
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
bool stats_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
||||
|
@ -36,7 +36,8 @@ void tsd_thread_event_init(tsd_t *tsd);
|
||||
*/
|
||||
#define ITERATE_OVER_ALL_EVENTS \
|
||||
E(tcache_gc, (TCACHE_GC_INCR_BYTES > 0)) \
|
||||
E(prof_sample, (config_prof && opt_prof))
|
||||
E(prof_sample, (config_prof && opt_prof)) \
|
||||
E(stats_interval, (opt_stats_interval >= 0))
|
||||
|
||||
#define E(event, condition) \
|
||||
C(event##_event_wait)
|
||||
@ -46,7 +47,8 @@ void tsd_thread_event_init(tsd_t *tsd);
|
||||
C(thread_allocated) \
|
||||
C(thread_allocated_last_event) \
|
||||
ITERATE_OVER_ALL_EVENTS \
|
||||
C(prof_sample_last_event)
|
||||
C(prof_sample_last_event) \
|
||||
C(stats_interval_last_event)
|
||||
|
||||
/* Getters directly wrap TSD getters. */
|
||||
#define C(counter) \
|
||||
|
@ -87,6 +87,8 @@ typedef void (*test_callback_t)(int *);
|
||||
O(tcache_gc_event_wait, uint64_t, uint64_t) \
|
||||
O(prof_sample_event_wait, uint64_t, uint64_t) \
|
||||
O(prof_sample_last_event, uint64_t, uint64_t) \
|
||||
O(stats_interval_event_wait, uint64_t, uint64_t) \
|
||||
O(stats_interval_last_event, uint64_t, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
|
||||
O(prng_state, uint64_t, uint64_t) \
|
||||
O(iarena, arena_t *, arena_t *) \
|
||||
@ -118,6 +120,8 @@ typedef void (*test_callback_t)(int *);
|
||||
/* tcache_gc_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* prof_sample_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* prof_sample_last_event */ 0, \
|
||||
/* stats_interval_event_wait */ THREAD_EVENT_MIN_START_WAIT, \
|
||||
/* stats_interval_last_event */ 0, \
|
||||
/* prof_tdata */ NULL, \
|
||||
/* prng_state */ 0, \
|
||||
/* iarena */ NULL, \
|
||||
|
@ -96,6 +96,8 @@ CTL_PROTO(opt_dirty_decay_ms)
|
||||
CTL_PROTO(opt_muzzy_decay_ms)
|
||||
CTL_PROTO(opt_stats_print)
|
||||
CTL_PROTO(opt_stats_print_opts)
|
||||
CTL_PROTO(opt_stats_interval)
|
||||
CTL_PROTO(opt_stats_interval_opts)
|
||||
CTL_PROTO(opt_junk)
|
||||
CTL_PROTO(opt_zero)
|
||||
CTL_PROTO(opt_utrace)
|
||||
@ -329,6 +331,8 @@ static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
|
||||
{NAME("stats_print"), CTL(opt_stats_print)},
|
||||
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
|
||||
{NAME("stats_interval"), CTL(opt_stats_interval)},
|
||||
{NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
|
||||
{NAME("junk"), CTL(opt_junk)},
|
||||
{NAME("zero"), CTL(opt_zero)},
|
||||
{NAME("utrace"), CTL(opt_utrace)},
|
||||
@ -1791,6 +1795,8 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
|
||||
CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
|
||||
CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
||||
|
@ -775,8 +775,8 @@ malloc_ncpus(void) {
|
||||
}
|
||||
|
||||
static void
|
||||
init_opt_stats_print_opts(const char *v, size_t vlen) {
|
||||
size_t opts_len = strlen(opt_stats_print_opts);
|
||||
init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
|
||||
size_t opts_len = strlen(dest);
|
||||
assert(opts_len <= stats_print_tot_num_options);
|
||||
|
||||
for (size_t i = 0; i < vlen; i++) {
|
||||
@ -787,16 +787,16 @@ init_opt_stats_print_opts(const char *v, size_t vlen) {
|
||||
default: continue;
|
||||
}
|
||||
|
||||
if (strchr(opt_stats_print_opts, v[i]) != NULL) {
|
||||
if (strchr(dest, v[i]) != NULL) {
|
||||
/* Ignore repeated. */
|
||||
continue;
|
||||
}
|
||||
|
||||
opt_stats_print_opts[opts_len++] = v[i];
|
||||
opt_stats_print_opts[opts_len] = '\0';
|
||||
dest[opts_len++] = v[i];
|
||||
dest[opts_len] = '\0';
|
||||
assert(opts_len <= stats_print_tot_num_options);
|
||||
}
|
||||
assert(opts_len == strlen(opt_stats_print_opts));
|
||||
assert(opts_len == strlen(dest));
|
||||
}
|
||||
|
||||
/* Reads the next size pair in a multi-sized option. */
|
||||
@ -1118,39 +1118,47 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
#define CONF_CHECK_MIN(um, min) ((um) < (min))
|
||||
#define CONF_DONT_CHECK_MAX(um, max) false
|
||||
#define CONF_CHECK_MAX(um, max) ((um) > (max))
|
||||
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
|
||||
|
||||
#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
|
||||
if (CONF_MATCH(n)) { \
|
||||
uintmax_t um; \
|
||||
max_t mv; \
|
||||
char *end; \
|
||||
\
|
||||
set_errno(0); \
|
||||
um = malloc_strtoumax(v, &end, 0); \
|
||||
mv = (max_t)malloc_strtoumax(v, &end, 0); \
|
||||
if (get_errno() != 0 || (uintptr_t)end -\
|
||||
(uintptr_t)v != vlen) { \
|
||||
CONF_ERROR("Invalid conf value",\
|
||||
k, klen, v, vlen); \
|
||||
} else if (clip) { \
|
||||
if (check_min(um, (t)(min))) { \
|
||||
if (check_min(mv, (t)(min))) { \
|
||||
o = (t)(min); \
|
||||
} else if ( \
|
||||
check_max(um, (t)(max))) { \
|
||||
check_max(mv, (t)(max))) { \
|
||||
o = (t)(max); \
|
||||
} else { \
|
||||
o = (t)um; \
|
||||
o = (t)mv; \
|
||||
} \
|
||||
} else { \
|
||||
if (check_min(um, (t)(min)) || \
|
||||
check_max(um, (t)(max))) { \
|
||||
if (check_min(mv, (t)(min)) || \
|
||||
check_max(mv, (t)(max))) { \
|
||||
CONF_ERROR( \
|
||||
"Out-of-range " \
|
||||
"conf value", \
|
||||
k, klen, v, vlen); \
|
||||
} else { \
|
||||
o = (t)um; \
|
||||
o = (t)mv; \
|
||||
} \
|
||||
} \
|
||||
CONF_CONTINUE; \
|
||||
}
|
||||
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
|
||||
CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \
|
||||
check_max, clip)
|
||||
#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
|
||||
CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \
|
||||
check_max, clip)
|
||||
|
||||
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
|
||||
clip) \
|
||||
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
|
||||
@ -1158,27 +1166,12 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
|
||||
CONF_HANDLE_T_U(size_t, o, n, min, max, \
|
||||
check_min, check_max, clip)
|
||||
#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \
|
||||
CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \
|
||||
check_min, check_max, clip)
|
||||
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
||||
if (CONF_MATCH(n)) { \
|
||||
long l; \
|
||||
char *end; \
|
||||
\
|
||||
set_errno(0); \
|
||||
l = strtol(v, &end, 0); \
|
||||
if (get_errno() != 0 || (uintptr_t)end -\
|
||||
(uintptr_t)v != vlen) { \
|
||||
CONF_ERROR("Invalid conf value",\
|
||||
k, klen, v, vlen); \
|
||||
} else if (l < (ssize_t)(min) || l > \
|
||||
(ssize_t)(max)) { \
|
||||
CONF_ERROR( \
|
||||
"Out-of-range conf value", \
|
||||
k, klen, v, vlen); \
|
||||
} else { \
|
||||
o = l; \
|
||||
} \
|
||||
CONF_CONTINUE; \
|
||||
}
|
||||
CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \
|
||||
CONF_CHECK_MIN, CONF_CHECK_MAX, false)
|
||||
#define CONF_HANDLE_CHAR_P(o, n, d) \
|
||||
if (CONF_MATCH(n)) { \
|
||||
size_t cpylen = (vlen <= \
|
||||
@ -1275,7 +1268,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
SSIZE_MAX);
|
||||
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
|
||||
if (CONF_MATCH("stats_print_opts")) {
|
||||
init_opt_stats_print_opts(v, vlen);
|
||||
init_opt_stats_opts(v, vlen,
|
||||
opt_stats_print_opts);
|
||||
CONF_CONTINUE;
|
||||
}
|
||||
CONF_HANDLE_INT64_T(opt_stats_interval,
|
||||
"stats_interval", -1, INT64_MAX,
|
||||
CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
||||
if (CONF_MATCH("stats_interval_opts")) {
|
||||
init_opt_stats_opts(v, vlen,
|
||||
opt_stats_interval_opts);
|
||||
CONF_CONTINUE;
|
||||
}
|
||||
if (config_fill) {
|
||||
@ -1463,7 +1465,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
#undef CONF_CHECK_MIN
|
||||
#undef CONF_DONT_CHECK_MAX
|
||||
#undef CONF_CHECK_MAX
|
||||
#undef CONF_HANDLE_T
|
||||
#undef CONF_HANDLE_T_U
|
||||
#undef CONF_HANDLE_T_SIGNED
|
||||
#undef CONF_HANDLE_UNSIGNED
|
||||
#undef CONF_HANDLE_SIZE_T
|
||||
#undef CONF_HANDLE_SSIZE_T
|
||||
@ -1545,7 +1549,6 @@ malloc_init_hard_a0_locked() {
|
||||
prof_boot0();
|
||||
}
|
||||
malloc_conf_init(&sc_data, bin_shard_sizes);
|
||||
thread_event_boot();
|
||||
sz_boot(&sc_data);
|
||||
bin_info_boot(&sc_data, bin_shard_sizes);
|
||||
|
||||
@ -1558,6 +1561,10 @@ malloc_init_hard_a0_locked() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (stats_boot()) {
|
||||
return true;
|
||||
}
|
||||
if (pages_boot()) {
|
||||
return true;
|
||||
}
|
||||
@ -1573,6 +1580,7 @@ malloc_init_hard_a0_locked() {
|
||||
if (config_prof) {
|
||||
prof_boot1();
|
||||
}
|
||||
thread_event_boot();
|
||||
arena_boot(&sc_data);
|
||||
if (tcache_boot(TSDN_NULL)) {
|
||||
return true;
|
||||
|
@ -571,7 +571,10 @@ void
|
||||
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
|
||||
cassert(config_prof);
|
||||
|
||||
return counter_rollback(tsdn, &prof_idump_accumulated, usize);
|
||||
/* Rollback is only done on arena_prof_promote of small sizes. */
|
||||
assert(SC_LARGE_MINCLASS > usize);
|
||||
return counter_rollback(tsdn, &prof_idump_accumulated,
|
||||
SC_LARGE_MINCLASS - usize);
|
||||
}
|
||||
|
||||
bool
|
||||
|
52
src/stats.c
52
src/stats.c
@ -50,6 +50,13 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
|
||||
bool opt_stats_print = false;
|
||||
char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
|
||||
|
||||
int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
|
||||
char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
|
||||
|
||||
static counter_accum_t stats_interval_accumulated;
|
||||
/* Per thread batch accum size for stats_interval. */
|
||||
static uint64_t stats_interval_accum_batch;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static uint64_t
|
||||
@ -1000,14 +1007,16 @@ stats_general_print(emitter_t *emitter) {
|
||||
unsigned uv;
|
||||
uint32_t u32v;
|
||||
uint64_t u64v;
|
||||
int64_t i64v;
|
||||
ssize_t ssv, ssv2;
|
||||
size_t sv, bsz, usz, ssz, sssz, cpsz;
|
||||
size_t sv, bsz, usz, i64sz, ssz, sssz, cpsz;
|
||||
|
||||
bsz = sizeof(bool);
|
||||
usz = sizeof(unsigned);
|
||||
ssz = sizeof(size_t);
|
||||
sssz = sizeof(ssize_t);
|
||||
cpsz = sizeof(const char *);
|
||||
i64sz = sizeof(int64_t);
|
||||
|
||||
CTL_GET("version", &cpv, const char *);
|
||||
emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
|
||||
@ -1063,6 +1072,9 @@ stats_general_print(emitter_t *emitter) {
|
||||
#define OPT_WRITE_UNSIGNED(name) \
|
||||
OPT_WRITE(name, uv, usz, emitter_type_unsigned)
|
||||
|
||||
#define OPT_WRITE_INT64(name) \
|
||||
OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
|
||||
|
||||
#define OPT_WRITE_SIZE_T(name) \
|
||||
OPT_WRITE(name, sv, ssz, emitter_type_size)
|
||||
#define OPT_WRITE_SSIZE_T(name) \
|
||||
@ -1109,6 +1121,10 @@ stats_general_print(emitter_t *emitter) {
|
||||
OPT_WRITE_BOOL("prof_leak")
|
||||
OPT_WRITE_BOOL("stats_print")
|
||||
OPT_WRITE_CHAR_P("stats_print_opts")
|
||||
OPT_WRITE_BOOL("stats_print")
|
||||
OPT_WRITE_CHAR_P("stats_print_opts")
|
||||
OPT_WRITE_INT64("stats_interval")
|
||||
OPT_WRITE_CHAR_P("stats_interval_opts")
|
||||
OPT_WRITE_CHAR_P("zero_realloc")
|
||||
|
||||
emitter_dict_end(emitter);
|
||||
@ -1477,3 +1493,37 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
|
||||
emitter_end(&emitter);
|
||||
}
|
||||
|
||||
bool
|
||||
stats_interval_accum(tsd_t *tsd, uint64_t bytes) {
|
||||
return counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated, bytes);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
stats_interval_accum_batch_size(void) {
|
||||
return stats_interval_accum_batch;
|
||||
}
|
||||
|
||||
bool
|
||||
stats_boot(void) {
|
||||
uint64_t stats_interval;
|
||||
if (opt_stats_interval < 0) {
|
||||
assert(opt_stats_interval == -1);
|
||||
stats_interval = 0;
|
||||
stats_interval_accum_batch = 0;
|
||||
} else{
|
||||
/* See comments in stats.h */
|
||||
stats_interval = (opt_stats_interval > 0) ?
|
||||
opt_stats_interval : 1;
|
||||
uint64_t batch = stats_interval >>
|
||||
STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
|
||||
if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
|
||||
batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
|
||||
} else if (batch == 0) {
|
||||
batch = 1;
|
||||
}
|
||||
stats_interval_accum_batch = batch;
|
||||
}
|
||||
|
||||
return counter_accum_init(&stats_interval_accumulated, stats_interval);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ static void thread_##event##_event_handler(tsd_t *tsd);
|
||||
ITERATE_OVER_ALL_EVENTS
|
||||
#undef E
|
||||
|
||||
/* (Re)Init functions. */
|
||||
static void
|
||||
tsd_thread_tcache_gc_event_init(tsd_t *tsd) {
|
||||
assert(TCACHE_GC_INCR_BYTES > 0);
|
||||
@ -37,11 +38,19 @@ tsd_thread_prof_sample_event_init(tsd_t *tsd) {
|
||||
prof_sample_threshold_update(tsd);
|
||||
}
|
||||
|
||||
static void
|
||||
tsd_thread_stats_interval_event_init(tsd_t *tsd) {
|
||||
assert(opt_stats_interval >= 0);
|
||||
uint64_t interval = stats_interval_accum_batch_size();
|
||||
thread_stats_interval_event_update(tsd, interval);
|
||||
}
|
||||
|
||||
/* Handler functions. */
|
||||
static void
|
||||
thread_tcache_gc_event_handler(tsd_t *tsd) {
|
||||
assert(TCACHE_GC_INCR_BYTES > 0);
|
||||
assert(tcache_gc_event_wait_get(tsd) == 0U);
|
||||
thread_tcache_gc_event_update(tsd, TCACHE_GC_INCR_BYTES);
|
||||
tsd_thread_tcache_gc_event_init(tsd);
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache != NULL) {
|
||||
tcache_event_hard(tsd, tcache);
|
||||
@ -71,6 +80,21 @@ thread_prof_sample_event_handler(tsd_t *tsd) {
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
thread_stats_interval_event_handler(tsd_t *tsd) {
|
||||
assert(opt_stats_interval >= 0);
|
||||
assert(stats_interval_event_wait_get(tsd) == 0U);
|
||||
uint64_t last_event = thread_allocated_last_event_get(tsd);
|
||||
uint64_t last_stats_event = stats_interval_last_event_get(tsd);
|
||||
stats_interval_last_event_set(tsd, last_event);
|
||||
|
||||
if (stats_interval_accum(tsd, last_event - last_stats_event)) {
|
||||
je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
|
||||
}
|
||||
tsd_thread_stats_interval_event_init(tsd);
|
||||
}
|
||||
/* Per event facilities done. */
|
||||
|
||||
static uint64_t
|
||||
thread_allocated_next_event_compute(tsd_t *tsd) {
|
||||
uint64_t wait = THREAD_EVENT_MAX_START_WAIT;
|
||||
|
128
test/unit/counter.c
Normal file
128
test/unit/counter.c
Normal file
@ -0,0 +1,128 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
static const uint64_t interval = 1 << 20;
|
||||
|
||||
TEST_BEGIN(test_counter_accum) {
|
||||
uint64_t increment = interval >> 4;
|
||||
unsigned n = interval / increment;
|
||||
uint64_t accum = 0;
|
||||
|
||||
counter_accum_t c;
|
||||
counter_accum_init(&c, interval);
|
||||
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
bool trigger;
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, increment);
|
||||
accum += increment;
|
||||
if (accum < interval) {
|
||||
assert_b_eq(trigger, false, "Should not trigger");
|
||||
} else {
|
||||
assert_b_eq(trigger, true, "Should have triggered");
|
||||
}
|
||||
}
|
||||
assert_b_eq(trigger, true, "Should have triggered");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
void
|
||||
assert_counter_value(counter_accum_t *c, uint64_t v) {
|
||||
uint64_t accum;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
accum = atomic_load_u64(&(c->accumbytes), ATOMIC_RELAXED);
|
||||
#else
|
||||
accum = c->accumbytes;
|
||||
#endif
|
||||
assert_u64_eq(accum, v, "Counter value mismatch");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_counter_rollback) {
|
||||
uint64_t half_interval = interval / 2;
|
||||
|
||||
counter_accum_t c;
|
||||
counter_accum_init(&c, interval);
|
||||
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
counter_rollback(tsd_tsdn(tsd), &c, half_interval);
|
||||
|
||||
bool trigger;
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
|
||||
assert_b_eq(trigger, false, "Should not trigger");
|
||||
counter_rollback(tsd_tsdn(tsd), &c, half_interval + 1);
|
||||
assert_counter_value(&c, 0);
|
||||
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
|
||||
assert_b_eq(trigger, false, "Should not trigger");
|
||||
counter_rollback(tsd_tsdn(tsd), &c, half_interval - 1);
|
||||
assert_counter_value(&c, 1);
|
||||
|
||||
counter_rollback(tsd_tsdn(tsd), &c, 1);
|
||||
assert_counter_value(&c, 0);
|
||||
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
|
||||
assert_b_eq(trigger, false, "Should not trigger");
|
||||
counter_rollback(tsd_tsdn(tsd), &c, 1);
|
||||
assert_counter_value(&c, half_interval - 1);
|
||||
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
|
||||
assert_b_eq(trigger, false, "Should not trigger");
|
||||
assert_counter_value(&c, interval - 1);
|
||||
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, 1);
|
||||
assert_b_eq(trigger, true, "Should have triggered");
|
||||
assert_counter_value(&c, 0);
|
||||
|
||||
trigger = counter_accum(tsd_tsdn(tsd), &c, interval + 1);
|
||||
assert_b_eq(trigger, true, "Should have triggered");
|
||||
assert_counter_value(&c, 1);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
#define N_THDS (16)
|
||||
#define N_ITER_THD (1 << 12)
|
||||
#define ITER_INCREMENT (interval >> 4)
|
||||
|
||||
static void *
|
||||
thd_start(void *varg) {
|
||||
counter_accum_t *c = (counter_accum_t *)varg;
|
||||
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
bool trigger;
|
||||
uintptr_t n_triggered = 0;
|
||||
for (unsigned i = 0; i < N_ITER_THD; i++) {
|
||||
trigger = counter_accum(tsd_tsdn(tsd), c, ITER_INCREMENT);
|
||||
n_triggered += trigger ? 1 : 0;
|
||||
}
|
||||
|
||||
return (void *)n_triggered;
|
||||
}
|
||||
|
||||
|
||||
TEST_BEGIN(test_counter_mt) {
|
||||
counter_accum_t shared_c;
|
||||
counter_accum_init(&shared_c, interval);
|
||||
|
||||
thd_t thds[N_THDS];
|
||||
unsigned i;
|
||||
for (i = 0; i < N_THDS; i++) {
|
||||
thd_create(&thds[i], thd_start, (void *)&shared_c);
|
||||
}
|
||||
|
||||
uint64_t sum = 0;
|
||||
for (i = 0; i < N_THDS; i++) {
|
||||
void *ret;
|
||||
thd_join(thds[i], &ret);
|
||||
sum += (uintptr_t)ret;
|
||||
}
|
||||
assert_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
|
||||
"Incorrect number of triggers");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
return test(
|
||||
test_counter_accum,
|
||||
test_counter_rollback,
|
||||
test_counter_mt);
|
||||
}
|
@ -170,6 +170,9 @@ TEST_BEGIN(test_mallctl_opt) {
|
||||
TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
|
||||
TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
|
||||
TEST_MALLCTL_OPT(bool, stats_print, always);
|
||||
TEST_MALLCTL_OPT(const char *, stats_print_opts, always);
|
||||
TEST_MALLCTL_OPT(int64_t, stats_interval, always);
|
||||
TEST_MALLCTL_OPT(const char *, stats_interval_opts, always);
|
||||
TEST_MALLCTL_OPT(const char *, junk, fill);
|
||||
TEST_MALLCTL_OPT(bool, zero, fill);
|
||||
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
||||
|
Loading…
Reference in New Issue
Block a user