2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_STATS_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-04-25 08:09:56 +08:00
|
|
|
#include "jemalloc/internal/ctl.h"
|
2018-03-02 09:29:58 +08:00
|
|
|
#include "jemalloc/internal/emitter.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-04-25 08:09:56 +08:00
|
|
|
#include "jemalloc/internal/mutex_prof.h"
|
2017-04-12 05:43:12 +08:00
|
|
|
|
2017-04-25 08:09:56 +08:00
|
|
|
const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
|
2017-03-22 02:56:38 +08:00
|
|
|
#define OP(mtx) #mtx,
|
2017-04-25 08:09:56 +08:00
|
|
|
MUTEX_PROF_GLOBAL_MUTEXES
|
2017-03-22 02:56:38 +08:00
|
|
|
#undef OP
|
|
|
|
};
|
|
|
|
|
2017-04-25 08:09:56 +08:00
|
|
|
const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
|
2017-03-22 02:56:38 +08:00
|
|
|
#define OP(mtx) #mtx,
|
2017-04-25 08:09:56 +08:00
|
|
|
MUTEX_PROF_ARENA_MUTEXES
|
2017-03-22 02:56:38 +08:00
|
|
|
#undef OP
|
|
|
|
};
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CTL_GET(n, v, t) do { \
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t sz = sizeof(t); \
|
2016-10-28 12:31:25 +08:00
|
|
|
xmallctl(n, (void *)v, &sz, NULL, 0); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CTL_M2_GET(n, i, v, t) do { \
|
2017-03-11 04:14:05 +08:00
|
|
|
size_t mib[CTL_MAX_DEPTH]; \
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
|
|
|
size_t sz = sizeof(t); \
|
|
|
|
xmallctlnametomib(n, mib, &miblen); \
|
2015-03-21 09:08:10 +08:00
|
|
|
mib[2] = (i); \
|
2016-10-28 12:31:25 +08:00
|
|
|
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
|
2017-03-11 04:14:05 +08:00
|
|
|
size_t mib[CTL_MAX_DEPTH]; \
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
|
|
|
size_t sz = sizeof(t); \
|
|
|
|
xmallctlnametomib(n, mib, &miblen); \
|
2015-03-21 09:08:10 +08:00
|
|
|
mib[2] = (i); \
|
|
|
|
mib[4] = (j); \
|
2016-10-28 12:31:25 +08:00
|
|
|
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} while (0)
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2017-05-28 06:35:36 +08:00
|
|
|
bool opt_stats_print = false;
|
|
|
|
char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2017-03-04 11:58:43 +08:00
|
|
|
/* Calculate x.yyy and output a string (takes a fixed sized char array). */
|
|
|
|
static bool
|
|
|
|
get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
|
|
|
|
if (divisor == 0 || dividend > divisor) {
|
|
|
|
/* The rate is not supposed to be greater than 1. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (dividend > 0) {
|
|
|
|
assert(UINT64_MAX / dividend >= 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned n = (unsigned)((dividend * 1000) / divisor);
|
|
|
|
if (n < 10) {
|
|
|
|
malloc_snprintf(str, 6, "0.00%u", n);
|
|
|
|
} else if (n < 100) {
|
|
|
|
malloc_snprintf(str, 6, "0.0%u", n);
|
|
|
|
} else if (n < 1000) {
|
|
|
|
malloc_snprintf(str, 6, "0.%u", n);
|
|
|
|
} else {
|
|
|
|
malloc_snprintf(str, 6, "1");
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-22 02:56:38 +08:00
|
|
|
#define MUTEX_CTL_STR_MAX_LENGTH 128
|
2017-03-12 17:28:52 +08:00
|
|
|
static void
|
2017-03-22 02:56:38 +08:00
|
|
|
gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
|
|
|
|
const char *mutex, const char *counter) {
|
|
|
|
malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
|
2017-03-12 17:28:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-14 08:29:03 +08:00
|
|
|
read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind,
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t results_uint64_t[mutex_prof_num_uint64_t_counters],
|
|
|
|
uint32_t results_uint32_t[mutex_prof_num_uint32_t_counters]) {
|
2017-03-22 02:56:38 +08:00
|
|
|
char cmd[MUTEX_CTL_STR_MAX_LENGTH];
|
|
|
|
#define OP(c, t) \
|
|
|
|
gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
|
|
|
|
"arenas.0.bins.0","mutex", #c); \
|
|
|
|
CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
|
2017-12-31 06:31:34 +08:00
|
|
|
(t *)&results_##t[mutex_counter_##c], t);
|
|
|
|
MUTEX_PROF_COUNTERS
|
2017-03-22 02:56:38 +08:00
|
|
|
#undef OP
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
const char *name, uint64_t stats_uint64_t[mutex_prof_num_uint64_t_counters],
|
|
|
|
uint32_t stats_uint32_t[mutex_prof_num_uint32_t_counters],
|
2017-03-22 02:56:38 +08:00
|
|
|
const char *json_indent, bool last) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name);
|
|
|
|
|
2017-12-31 06:31:34 +08:00
|
|
|
mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
|
|
|
|
mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
|
2017-03-22 02:56:38 +08:00
|
|
|
char *fmt_str[2] = {"%s\t\"%s\": %"FMTu32"%s\n",
|
|
|
|
"%s\t\"%s\": %"FMTu64"%s\n"};
|
|
|
|
#define OP(c, t) \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
|
2017-12-31 06:31:34 +08:00
|
|
|
json_indent, #c, (t)stats_##t[mutex_counter_##c], \
|
|
|
|
(++k_##t && k_uint32_t == mutex_prof_num_uint32_t_counters) ? "" : ",");
|
|
|
|
MUTEX_PROF_COUNTERS
|
2017-03-22 02:56:38 +08:00
|
|
|
#undef OP
|
2017-12-31 06:31:34 +08:00
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent,
|
2017-03-22 02:56:38 +08:00
|
|
|
last ? "" : ",");
|
2017-03-12 17:28:52 +08:00
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
static void
|
2010-03-04 09:45:38 +08:00
|
|
|
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-03-22 02:56:38 +08:00
|
|
|
bool json, bool large, bool mutex, unsigned i) {
|
2012-04-02 22:15:42 +08:00
|
|
|
size_t page;
|
2017-01-18 17:01:19 +08:00
|
|
|
bool in_gap, in_gap_prev;
|
2014-10-13 13:53:59 +08:00
|
|
|
unsigned nbins, j;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2012-04-02 22:15:42 +08:00
|
|
|
CTL_GET("arenas.page", &page, size_t);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
|
|
|
if (json) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t\"bins\": [\n");
|
2010-01-28 05:10:55 +08:00
|
|
|
} else {
|
2017-04-19 06:00:14 +08:00
|
|
|
char *mutex_counters = " n_lock_ops n_waiting"
|
2017-10-20 03:01:20 +08:00
|
|
|
" n_spin_acq n_owner_switch total_wait_ns"
|
|
|
|
" max_wait_ns max_n_thds\n";
|
2017-04-21 08:21:37 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"bins: size ind allocated nmalloc"
|
|
|
|
" ndalloc nrequests curregs curslabs regs"
|
|
|
|
" pgs util nfills nflushes newslabs"
|
|
|
|
" reslabs%s", mutex ? mutex_counters : "\n");
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
for (j = 0, in_gap = false; j < nbins; j++) {
|
2016-05-30 09:34:50 +08:00
|
|
|
uint64_t nslabs;
|
2016-11-01 13:30:49 +08:00
|
|
|
size_t reg_size, slab_size, curregs;
|
|
|
|
size_t curslabs;
|
|
|
|
uint32_t nregs;
|
|
|
|
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
|
|
|
uint64_t nreslabs;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
|
2015-03-21 09:08:10 +08:00
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
in_gap_prev = in_gap;
|
2016-11-02 06:26:35 +08:00
|
|
|
in_gap = (nslabs == 0);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (!json && in_gap_prev && !in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t);
|
|
|
|
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
|
|
|
|
CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
|
|
|
|
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
|
|
|
|
size_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
|
|
|
|
&nrequests, uint64_t);
|
2017-04-21 08:21:37 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
|
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
|
|
|
|
size_t);
|
2017-03-11 04:14:05 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t{\n"
|
|
|
|
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"curregs\": %zu,\n"
|
2017-04-21 08:21:37 +08:00
|
|
|
"\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n"
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
|
2017-03-22 02:56:38 +08:00
|
|
|
"\t\t\t\t\t\t\"curslabs\": %zu%s\n",
|
2017-04-21 08:21:37 +08:00
|
|
|
nmalloc, ndalloc, curregs, nrequests, nfills,
|
|
|
|
nflushes, nreslabs, curslabs, mutex ? "," : "");
|
2017-03-22 02:56:38 +08:00
|
|
|
if (mutex) {
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t mutex_stats_64[mutex_prof_num_uint64_t_counters];
|
|
|
|
uint32_t mutex_stats_32[mutex_prof_num_uint32_t_counters];
|
|
|
|
read_arena_bin_mutex_stats(i, j, mutex_stats_64, mutex_stats_32);
|
2017-03-22 02:56:38 +08:00
|
|
|
mutex_stats_output_json(write_cb, cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
"mutex", mutex_stats_64, mutex_stats_32, "\t\t\t\t\t\t", true);
|
2017-03-12 17:28:52 +08:00
|
|
|
}
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t}%s\n",
|
2016-11-01 13:30:49 +08:00
|
|
|
(j + 1 < nbins) ? "," : "");
|
|
|
|
} else if (!in_gap) {
|
2017-03-04 11:58:43 +08:00
|
|
|
size_t availregs = nregs * curslabs;
|
|
|
|
char util[6];
|
|
|
|
if (get_rate_str((uint64_t)curregs, (uint64_t)availregs,
|
|
|
|
util)) {
|
|
|
|
if (availregs == 0) {
|
|
|
|
malloc_snprintf(util, sizeof(util),
|
|
|
|
"1");
|
|
|
|
} else if (curregs > availregs) {
|
|
|
|
/*
|
|
|
|
* Race detected: the counters were read
|
|
|
|
* in separate mallctl calls and
|
|
|
|
* concurrent operations happened in
|
|
|
|
* between. In this case no meaningful
|
|
|
|
* utilization can be computed.
|
|
|
|
*/
|
|
|
|
malloc_snprintf(util, sizeof(util),
|
|
|
|
" race");
|
|
|
|
} else {
|
|
|
|
not_reached();
|
|
|
|
}
|
|
|
|
}
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t mutex_stats_64[mutex_prof_num_uint64_t_counters];
|
|
|
|
uint32_t mutex_stats_32[mutex_prof_num_uint32_t_counters];
|
2017-04-19 06:00:14 +08:00
|
|
|
if (mutex) {
|
2017-12-31 06:31:34 +08:00
|
|
|
read_arena_bin_mutex_stats(i, j, mutex_stats_64, mutex_stats_32);
|
2017-04-19 06:00:14 +08:00
|
|
|
}
|
2017-03-12 17:28:52 +08:00
|
|
|
|
2017-04-21 08:21:37 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"
|
|
|
|
FMTu64" %12"FMTu64" %12"FMTu64" %12zu %12zu %4u"
|
|
|
|
" %3zu %-5s %12"FMTu64" %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64, reg_size, j, curregs * reg_size,
|
|
|
|
nmalloc, ndalloc, nrequests, curregs, curslabs,
|
|
|
|
nregs, slab_size / page, util, nfills, nflushes,
|
|
|
|
nslabs, nreslabs);
|
2017-05-31 06:56:01 +08:00
|
|
|
|
2017-04-19 06:00:14 +08:00
|
|
|
if (mutex) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" %12"FMTu64" %12"FMTu64" %12"FMTu64
|
2017-10-20 03:01:20 +08:00
|
|
|
" %14"FMTu64" %14"FMTu64" %12"FMTu64
|
2017-12-31 06:31:34 +08:00
|
|
|
" %10"FMTu32"\n",
|
|
|
|
mutex_stats_64[mutex_counter_num_ops],
|
|
|
|
mutex_stats_64[mutex_counter_num_wait],
|
|
|
|
mutex_stats_64[mutex_counter_num_spin_acq],
|
|
|
|
mutex_stats_64[mutex_counter_num_owner_switch],
|
|
|
|
mutex_stats_64[mutex_counter_total_wait_time],
|
|
|
|
mutex_stats_64[mutex_counter_max_wait_time],
|
|
|
|
mutex_stats_32[mutex_counter_max_num_thds]);
|
2017-04-19 06:00:14 +08:00
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "\n");
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
2014-10-13 13:53:59 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t]%s\n", large ? "," : "");
|
|
|
|
} else {
|
|
|
|
if (in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
static void
|
2016-06-01 05:50:21 +08:00
|
|
|
stats_arena_lextents_print(void (*write_cb)(void *, const char *),
|
2017-01-16 08:56:30 +08:00
|
|
|
void *cbopaque, bool json, unsigned i) {
|
2016-06-01 05:50:21 +08:00
|
|
|
unsigned nbins, nlextents, j;
|
2016-11-01 13:30:49 +08:00
|
|
|
bool in_gap, in_gap_prev;
|
2014-10-13 13:53:59 +08:00
|
|
|
|
|
|
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_GET("arenas.nlextents", &nlextents, unsigned);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"lextents\": [\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"large: size ind allocated nmalloc"
|
|
|
|
" ndalloc nrequests curlextents\n");
|
|
|
|
}
|
2016-06-01 05:50:21 +08:00
|
|
|
for (j = 0, in_gap = false; j < nlextents; j++) {
|
2014-10-13 13:53:59 +08:00
|
|
|
uint64_t nmalloc, ndalloc, nrequests;
|
2016-06-01 05:50:21 +08:00
|
|
|
size_t lextent_size, curlextents;
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
|
2015-03-21 09:08:10 +08:00
|
|
|
&nmalloc, uint64_t);
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
|
2015-03-21 09:08:10 +08:00
|
|
|
&ndalloc, uint64_t);
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
|
2015-03-21 09:08:10 +08:00
|
|
|
&nrequests, uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
in_gap_prev = in_gap;
|
2016-11-02 06:26:35 +08:00
|
|
|
in_gap = (nrequests == 0);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
if (!json && in_gap_prev && !in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
|
|
|
|
&curlextents, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t{\n"
|
|
|
|
"\t\t\t\t\t\t\"curlextents\": %zu\n"
|
|
|
|
"\t\t\t\t\t}%s\n",
|
|
|
|
curlextents,
|
|
|
|
(j + 1 < nlextents) ? "," : "");
|
|
|
|
} else if (!in_gap) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2015-07-24 04:56:25 +08:00
|
|
|
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64" %12zu\n",
|
2016-06-01 05:50:21 +08:00
|
|
|
lextent_size, nbins + j,
|
|
|
|
curlextents * lextent_size, nmalloc, ndalloc,
|
|
|
|
nrequests, curlextents);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
2014-10-13 13:53:59 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t]\n");
|
|
|
|
} else {
|
|
|
|
if (in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2017-03-11 04:14:05 +08:00
|
|
|
static void
|
2017-03-14 08:29:03 +08:00
|
|
|
read_arena_mutex_stats(unsigned arena_ind,
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t results_uint64_t[mutex_prof_num_arena_mutexes][mutex_prof_num_uint64_t_counters],
|
|
|
|
uint32_t results_uint32_t[mutex_prof_num_arena_mutexes][mutex_prof_num_uint32_t_counters]) {
|
2017-03-22 02:56:38 +08:00
|
|
|
char cmd[MUTEX_CTL_STR_MAX_LENGTH];
|
|
|
|
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_arena_ind_t i;
|
|
|
|
for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
|
2017-03-22 02:56:38 +08:00
|
|
|
#define OP(c, t) \
|
|
|
|
gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
|
|
|
|
"arenas.0.mutexes", arena_mutex_names[i], #c); \
|
|
|
|
CTL_M2_GET(cmd, arena_ind, \
|
2017-12-31 06:31:34 +08:00
|
|
|
(t *)&results_##t[i][mutex_counter_##c], t);
|
2017-03-22 02:56:38 +08:00
|
|
|
MUTEX_PROF_COUNTERS
|
|
|
|
#undef OP
|
2017-03-11 04:14:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-12 17:28:52 +08:00
|
|
|
static void
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
const char *name, uint64_t stats_uint64_t[mutex_prof_num_uint64_t_counters],
|
|
|
|
uint32_t stats_uint32_t[mutex_prof_num_uint32_t_counters],
|
2017-03-12 12:28:31 +08:00
|
|
|
bool first_mutex) {
|
|
|
|
if (first_mutex) {
|
|
|
|
/* Print title. */
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-03-22 02:56:38 +08:00
|
|
|
" n_lock_ops n_waiting"
|
|
|
|
" n_spin_acq n_owner_switch total_wait_ns"
|
|
|
|
" max_wait_ns max_n_thds\n");
|
2017-03-12 12:28:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "%s", name);
|
|
|
|
malloc_cprintf(write_cb, cbopaque, ":%*c",
|
2017-03-22 02:56:38 +08:00
|
|
|
(int)(20 - strlen(name)), ' ');
|
|
|
|
|
|
|
|
char *fmt_str[2] = {"%12"FMTu32, "%16"FMTu64};
|
|
|
|
#define OP(c, t) \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
|
2017-12-31 06:31:34 +08:00
|
|
|
(t)stats_##t[mutex_counter_##c]);
|
2017-03-22 02:56:38 +08:00
|
|
|
MUTEX_PROF_COUNTERS
|
|
|
|
#undef OP
|
2017-03-12 12:28:31 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "\n");
|
|
|
|
}
|
|
|
|
|
2017-03-11 04:14:05 +08:00
|
|
|
static void
|
2017-03-14 08:29:03 +08:00
|
|
|
stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
|
2017-03-12 17:28:52 +08:00
|
|
|
void *cbopaque, bool json, bool json_end, unsigned arena_ind) {
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t mutex_stats_64[mutex_prof_num_arena_mutexes][mutex_prof_num_uint64_t_counters];
|
|
|
|
uint32_t mutex_stats_32[mutex_prof_num_arena_mutexes][mutex_prof_num_uint32_t_counters];
|
|
|
|
read_arena_mutex_stats(arena_ind, mutex_stats_64, mutex_stats_32);
|
2017-03-11 04:14:05 +08:00
|
|
|
|
2017-03-14 08:29:03 +08:00
|
|
|
/* Output mutex stats. */
|
2017-03-11 04:14:05 +08:00
|
|
|
if (json) {
|
2017-03-14 08:29:03 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n");
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_arena_ind_t i, last_mutex;
|
|
|
|
last_mutex = mutex_prof_num_arena_mutexes - 1;
|
|
|
|
for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_stats_output_json(write_cb, cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
arena_mutex_names[i], mutex_stats_64[i], mutex_stats_32[i],
|
2017-03-22 02:56:38 +08:00
|
|
|
"\t\t\t\t\t", (i == last_mutex));
|
2017-03-12 17:28:52 +08:00
|
|
|
}
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n",
|
|
|
|
json_end ? "" : ",");
|
2017-03-11 04:14:05 +08:00
|
|
|
} else {
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_arena_ind_t i;
|
|
|
|
for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_stats_output(write_cb, cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
arena_mutex_names[i], mutex_stats_64[i], mutex_stats_32[i], i == 0);
|
2017-03-11 04:14:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
static void
|
2010-03-04 09:45:38 +08:00
|
|
|
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-03-14 08:29:03 +08:00
|
|
|
bool json, unsigned i, bool bins, bool large, bool mutex) {
|
2011-03-19 04:41:33 +08:00
|
|
|
unsigned nthreads;
|
2012-10-12 04:53:15 +08:00
|
|
|
const char *dss;
|
2017-05-18 01:47:00 +08:00
|
|
|
ssize_t dirty_decay_ms, muzzy_decay_ms;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t page, pactive, pdirty, pmuzzy, mapped, retained;
|
2017-08-26 04:24:49 +08:00
|
|
|
size_t base, internal, resident, metadata_thp;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
|
|
|
|
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t small_allocated;
|
2010-03-14 12:32:56 +08:00
|
|
|
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
2016-06-01 05:50:21 +08:00
|
|
|
size_t large_allocated;
|
|
|
|
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
2016-04-23 09:37:44 +08:00
|
|
|
size_t tcache_bytes;
|
2017-05-17 04:56:00 +08:00
|
|
|
uint64_t uptime;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2012-04-02 22:15:42 +08:00
|
|
|
CTL_GET("arenas.page", &page, size_t);
|
2010-01-30 03:24:19 +08:00
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"nthreads\": %u,\n", nthreads);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"assigned threads: %u\n", nthreads);
|
|
|
|
}
|
|
|
|
|
2017-05-17 04:56:00 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"uptime_ns\": %"FMTu64",\n", uptime);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"uptime: %"FMTu64"\n", uptime);
|
|
|
|
}
|
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"dss\": \"%s\",\n", dss);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"dss allocation precedence: %s\n", dss);
|
|
|
|
}
|
|
|
|
|
2017-05-18 01:47:00 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
ssize_t);
|
2017-05-18 01:47:00 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
ssize_t);
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-05-18 01:47:00 +08:00
|
|
|
"\t\t\t\t\"dirty_decay_ms\": %zd,\n", dirty_decay_ms);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-05-18 01:47:00 +08:00
|
|
|
"\t\t\t\t\"muzzy_decay_ms\": %zd,\n", muzzy_decay_ms);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"pactive\": %zu,\n", pactive);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"pdirty\": %zu,\n", pdirty);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged);
|
2016-11-01 13:30:49 +08:00
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"decaying: time npages sweeps madvises"
|
|
|
|
" purged\n");
|
2017-05-18 01:47:00 +08:00
|
|
|
if (dirty_decay_ms >= 0) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
|
2017-05-18 01:47:00 +08:00
|
|
|
FMTu64"\n", dirty_decay_ms, pdirty, dirty_npurge,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
dirty_nmadvise, dirty_purged);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
|
|
|
|
FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise,
|
|
|
|
dirty_purged);
|
|
|
|
}
|
2017-05-18 01:47:00 +08:00
|
|
|
if (muzzy_decay_ms >= 0) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
|
2017-05-18 01:47:00 +08:00
|
|
|
FMTu64"\n", muzzy_decay_ms, pmuzzy, muzzy_npurge,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
muzzy_nmadvise, muzzy_purged);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
|
|
|
|
FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise,
|
|
|
|
muzzy_purged);
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
|
|
|
|
size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
|
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"small\": {\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t},\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" allocated nmalloc"
|
|
|
|
" ndalloc nrequests\n");
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"small: %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
|
|
|
small_allocated, small_nmalloc, small_ndalloc,
|
|
|
|
small_nrequests);
|
|
|
|
}
|
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
|
|
|
|
size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
|
2015-03-21 09:08:10 +08:00
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"large\": {\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t},\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"large: %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
|
|
|
large_allocated, large_nmalloc, large_ndalloc,
|
|
|
|
large_nrequests);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"total: %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
|
|
|
small_allocated + large_allocated, small_nmalloc +
|
|
|
|
large_nmalloc, small_ndalloc + large_ndalloc,
|
|
|
|
small_nrequests + large_nrequests);
|
|
|
|
}
|
|
|
|
if (!json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"active: %12zu\n", pactive * page);
|
|
|
|
}
|
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"mapped\": %zu,\n", mapped);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"mapped: %12zu\n", mapped);
|
|
|
|
}
|
|
|
|
|
2016-05-04 13:11:35 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"retained\": %zu,\n", retained);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"retained: %12zu\n", retained);
|
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.base", i, &base, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-23 06:39:10 +08:00
|
|
|
"\t\t\t\t\"base\": %zu,\n", base);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"base: %12zu\n", base);
|
|
|
|
}
|
|
|
|
|
|
|
|
CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"internal\": %zu,\n", internal);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"internal: %12zu\n", internal);
|
|
|
|
}
|
|
|
|
|
2017-08-26 04:24:49 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.metadata_thp", i, &metadata_thp, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"metadata_thp\": %zu,\n", metadata_thp);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"metadata_thp: %12zu\n", metadata_thp);
|
|
|
|
}
|
|
|
|
|
2017-04-21 08:21:37 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"tcache: %12zu\n", tcache_bytes);
|
2016-04-23 09:37:44 +08:00
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-03-12 17:28:52 +08:00
|
|
|
"\t\t\t\t\"resident\": %zu%s\n", resident,
|
2017-03-14 08:29:03 +08:00
|
|
|
(bins || large || mutex) ? "," : "");
|
2016-11-01 13:30:49 +08:00
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-23 06:39:10 +08:00
|
|
|
"resident: %12zu\n", resident);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2017-03-14 08:29:03 +08:00
|
|
|
if (mutex) {
|
|
|
|
stats_arena_mutexes_print(write_cb, cbopaque, json,
|
2017-03-12 17:28:52 +08:00
|
|
|
!(bins || large), i);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (bins) {
|
2017-03-22 02:56:38 +08:00
|
|
|
stats_arena_bins_print(write_cb, cbopaque, json, large, mutex,
|
|
|
|
i);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (large) {
|
2016-11-01 13:30:49 +08:00
|
|
|
stats_arena_lextents_print(write_cb, cbopaque, json, i);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
static void
|
2018-03-02 09:38:15 +08:00
|
|
|
stats_general_print(emitter_t *emitter, bool more) {
|
|
|
|
/*
|
|
|
|
* These should eventually be deleted; they are useful in converting
|
|
|
|
* from manual to emitter-based stats output, though.
|
|
|
|
*/
|
|
|
|
void (*write_cb)(void *, const char *) = emitter->write_cb;
|
|
|
|
void *cbopaque = emitter->cbopaque;
|
|
|
|
bool json = (emitter->output == emitter_output_json);
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
const char *cpv;
|
2018-03-02 09:38:15 +08:00
|
|
|
bool bv, bv2;
|
2016-11-01 13:30:49 +08:00
|
|
|
unsigned uv;
|
|
|
|
uint32_t u32v;
|
|
|
|
uint64_t u64v;
|
2018-03-02 09:38:15 +08:00
|
|
|
ssize_t ssv, ssv2;
|
2016-11-01 13:30:49 +08:00
|
|
|
size_t sv, bsz, usz, ssz, sssz, cpsz;
|
|
|
|
|
|
|
|
bsz = sizeof(bool);
|
|
|
|
usz = sizeof(unsigned);
|
|
|
|
ssz = sizeof(size_t);
|
|
|
|
sssz = sizeof(ssize_t);
|
|
|
|
cpsz = sizeof(const char *);
|
|
|
|
|
|
|
|
CTL_GET("version", &cpv, const char *);
|
2018-03-02 09:38:15 +08:00
|
|
|
emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* config. */
|
2018-03-02 09:38:15 +08:00
|
|
|
emitter_dict_begin(emitter, "config", "Build-time option settings");
|
|
|
|
#define CONFIG_WRITE_BOOL(name) \
|
|
|
|
do { \
|
|
|
|
CTL_GET("config."#name, &bv, bool); \
|
|
|
|
emitter_kv(emitter, #name, "config."#name, \
|
|
|
|
emitter_type_bool, &bv); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
CONFIG_WRITE_BOOL(cache_oblivious);
|
|
|
|
CONFIG_WRITE_BOOL(debug);
|
|
|
|
CONFIG_WRITE_BOOL(fill);
|
|
|
|
CONFIG_WRITE_BOOL(lazy_lock);
|
|
|
|
emitter_kv(emitter, "malloc_conf", "config.malloc_conf",
|
|
|
|
emitter_type_string, &config_malloc_conf);
|
|
|
|
|
|
|
|
CONFIG_WRITE_BOOL(prof);
|
|
|
|
CONFIG_WRITE_BOOL(prof_libgcc);
|
|
|
|
CONFIG_WRITE_BOOL(prof_libunwind);
|
|
|
|
CONFIG_WRITE_BOOL(stats);
|
|
|
|
CONFIG_WRITE_BOOL(utrace);
|
|
|
|
CONFIG_WRITE_BOOL(xmalloc);
|
|
|
|
#undef CONFIG_WRITE_BOOL
|
|
|
|
emitter_dict_end(emitter); /* Close "config" dict. */
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* opt. */
|
2018-03-02 09:38:15 +08:00
|
|
|
#define OPT_WRITE(name, var, size, emitter_type) \
|
|
|
|
if (je_mallctl("opt."#name, (void *)&var, &size, NULL, 0) == \
|
|
|
|
0) { \
|
|
|
|
emitter_kv(emitter, #name, "opt."#name, emitter_type, \
|
|
|
|
&var); \
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2018-03-02 09:38:15 +08:00
|
|
|
#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \
|
|
|
|
altname) \
|
|
|
|
if (je_mallctl("opt."#name, (void *)&var1, &size, NULL, 0) == \
|
|
|
|
0 && je_mallctl(#altname, (void *)&var2, &size, NULL, 0) \
|
|
|
|
== 0) { \
|
|
|
|
emitter_kv_note(emitter, #name, "opt."#name, \
|
|
|
|
emitter_type, &var1, #altname, emitter_type, \
|
|
|
|
&var2); \
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2018-03-02 09:38:15 +08:00
|
|
|
#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool)
|
|
|
|
#define OPT_WRITE_BOOL_MUTABLE(name, altname) \
|
|
|
|
OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname)
|
|
|
|
|
|
|
|
#define OPT_WRITE_UNSIGNED(name) \
|
|
|
|
OPT_WRITE(name, uv, usz, emitter_type_unsigned)
|
|
|
|
|
|
|
|
#define OPT_WRITE_SSIZE_T(name) \
|
|
|
|
OPT_WRITE(name, ssv, sssz, emitter_type_ssize)
|
|
|
|
#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \
|
|
|
|
OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \
|
|
|
|
altname)
|
|
|
|
|
|
|
|
#define OPT_WRITE_CHAR_P(name) \
|
|
|
|
OPT_WRITE(name, cpv, cpsz, emitter_type_string)
|
|
|
|
|
|
|
|
emitter_dict_begin(emitter, "opt", "Run-time option settings");
|
|
|
|
|
|
|
|
OPT_WRITE_BOOL(abort)
|
|
|
|
OPT_WRITE_BOOL(abort_conf)
|
|
|
|
OPT_WRITE_BOOL(retain)
|
|
|
|
OPT_WRITE_CHAR_P(dss)
|
|
|
|
OPT_WRITE_UNSIGNED(narenas)
|
|
|
|
OPT_WRITE_CHAR_P(percpu_arena)
|
|
|
|
OPT_WRITE_CHAR_P(metadata_thp)
|
|
|
|
OPT_WRITE_BOOL_MUTABLE(background_thread, background_thread)
|
|
|
|
OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_ms, arenas.dirty_decay_ms)
|
|
|
|
OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_ms, arenas.muzzy_decay_ms)
|
|
|
|
OPT_WRITE_UNSIGNED(lg_extent_max_active_fit)
|
|
|
|
OPT_WRITE_CHAR_P(junk)
|
|
|
|
OPT_WRITE_BOOL(zero)
|
|
|
|
OPT_WRITE_BOOL(utrace)
|
|
|
|
OPT_WRITE_BOOL(xmalloc)
|
|
|
|
OPT_WRITE_BOOL(tcache)
|
|
|
|
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
|
|
|
OPT_WRITE_CHAR_P(thp)
|
|
|
|
OPT_WRITE_BOOL(prof)
|
|
|
|
OPT_WRITE_CHAR_P(prof_prefix)
|
|
|
|
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
|
|
|
|
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init)
|
|
|
|
OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample)
|
|
|
|
OPT_WRITE_BOOL(prof_accum)
|
|
|
|
OPT_WRITE_SSIZE_T(lg_prof_interval)
|
|
|
|
OPT_WRITE_BOOL(prof_gdump)
|
|
|
|
OPT_WRITE_BOOL(prof_final)
|
|
|
|
OPT_WRITE_BOOL(prof_leak)
|
|
|
|
OPT_WRITE_BOOL(stats_print)
|
|
|
|
OPT_WRITE_CHAR_P(stats_print_opts)
|
|
|
|
|
|
|
|
emitter_dict_end(emitter);
|
|
|
|
|
|
|
|
#undef OPT_WRITE
|
|
|
|
#undef OPT_WRITE_MUTABLE
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef OPT_WRITE_BOOL
|
2014-10-04 14:25:30 +08:00
|
|
|
#undef OPT_WRITE_BOOL_MUTABLE
|
2017-12-15 03:14:08 +08:00
|
|
|
#undef OPT_WRITE_UNSIGNED
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef OPT_WRITE_SSIZE_T
|
2017-12-15 03:14:08 +08:00
|
|
|
#undef OPT_WRITE_SSIZE_T_MUTABLE
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef OPT_WRITE_CHAR_P
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* arenas. */
|
2018-03-02 10:02:42 +08:00
|
|
|
/*
|
|
|
|
* The json output sticks arena info into an "arenas" dict; the table
|
|
|
|
* output puts them at the top-level.
|
|
|
|
*/
|
|
|
|
emitter_json_dict_begin(emitter, "arenas");
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.narenas", &uv, unsigned);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
/*
|
|
|
|
* Decay settings are emitted only in json mode; in table mode, they're
|
|
|
|
* emitted as notes with the opt output, above.
|
|
|
|
*/
|
|
|
|
CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
|
|
|
|
emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
|
|
|
|
emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.quantum", &sv, size_t);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.page", &sv, size_t);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv);
|
2012-04-02 22:04:34 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_kv(emitter, "tcache_max",
|
|
|
|
"Maximum thread-cached size class", emitter_type_size, &sv);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
unsigned nbins;
|
|
|
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
|
|
|
emitter_kv(emitter, "nbins", "Number of bin size classes",
|
|
|
|
emitter_type_unsigned, &nbins);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
unsigned nhbins;
|
|
|
|
CTL_GET("arenas.nhbins", &nhbins, unsigned);
|
|
|
|
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
|
|
|
|
emitter_type_unsigned, &nhbins);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
/*
|
|
|
|
* We do enough mallctls in a loop that we actually want to omit them
|
|
|
|
* (not just omit the printing).
|
|
|
|
*/
|
|
|
|
if (emitter->output == emitter_output_json) {
|
|
|
|
emitter_json_arr_begin(emitter, "bin");
|
|
|
|
for (unsigned i = 0; i < nbins; i++) {
|
|
|
|
emitter_json_arr_obj_begin(emitter);
|
2010-03-02 12:15:26 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_kv(emitter, "size", emitter_type_size,
|
|
|
|
&sv);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_kv(emitter, "nregs", emitter_type_uint32,
|
|
|
|
&u32v);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_kv(emitter, "slab_size", emitter_type_size,
|
|
|
|
&sv);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_arr_obj_end(emitter);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_arr_end(emitter); /* Close "bin". */
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
unsigned nlextents;
|
|
|
|
CTL_GET("arenas.nlextents", &nlextents, unsigned);
|
|
|
|
emitter_kv(emitter, "nlextents", "Number of large size classes",
|
|
|
|
emitter_type_unsigned, &nlextents);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
if (emitter->output == emitter_output_json) {
|
|
|
|
emitter_json_arr_begin(emitter, "lextent");
|
|
|
|
for (unsigned i = 0; i < nlextents; i++) {
|
|
|
|
emitter_json_arr_obj_begin(emitter);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_kv(emitter, "size", emitter_type_size,
|
|
|
|
&sv);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_arr_obj_end(emitter);
|
2010-02-12 05:19:21 +08:00
|
|
|
}
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_arr_end(emitter); /* Close "lextent". */
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 10:02:42 +08:00
|
|
|
emitter_json_dict_end(emitter); /* Close "arenas" */
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
if (more || config_prof) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque, ",\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "\n");
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* prof. */
|
2016-12-24 03:15:44 +08:00
|
|
|
if (config_prof && json) {
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"prof\": {\n");
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("prof.thread_active_init", &bv, bool);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
|
|
|
|
"false");
|
|
|
|
|
|
|
|
CTL_GET("prof.active", &bv, bool);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"active\": %s,\n", bv ? "true" : "false");
|
|
|
|
|
|
|
|
CTL_GET("prof.gdump", &bv, bool);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
|
|
|
|
|
|
|
|
CTL_GET("prof.interval", &u64v, uint64_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"interval\": %"FMTu64",\n", u64v);
|
|
|
|
|
|
|
|
CTL_GET("prof.lg_sample", &ssv, ssize_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"lg_sample\": %zd\n", ssv);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-24 03:15:44 +08:00
|
|
|
"\t\t}%s\n", more ? "," : "");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-14 08:29:03 +08:00
|
|
|
static void
|
|
|
|
read_global_mutex_stats(
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t results_uint64_t[mutex_prof_num_global_mutexes][mutex_prof_num_uint64_t_counters],
|
|
|
|
uint32_t results_uint32_t[mutex_prof_num_global_mutexes][mutex_prof_num_uint32_t_counters]) {
|
2017-03-22 02:56:38 +08:00
|
|
|
char cmd[MUTEX_CTL_STR_MAX_LENGTH];
|
|
|
|
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_global_ind_t i;
|
|
|
|
for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
|
2017-03-22 02:56:38 +08:00
|
|
|
#define OP(c, t) \
|
|
|
|
gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
|
|
|
|
"mutexes", global_mutex_names[i], #c); \
|
2017-12-31 06:31:34 +08:00
|
|
|
CTL_GET(cmd, (t *)&results_##t[i][mutex_counter_##c], t);
|
2017-03-22 02:56:38 +08:00
|
|
|
MUTEX_PROF_COUNTERS
|
|
|
|
#undef OP
|
2017-03-12 12:28:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
static void
|
|
|
|
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-04 09:21:59 +08:00
|
|
|
bool json, bool merged, bool destroyed, bool unmerged, bool bins,
|
2017-03-14 08:29:03 +08:00
|
|
|
bool large, bool mutex) {
|
2017-08-26 04:24:49 +08:00
|
|
|
size_t allocated, active, metadata, metadata_thp, resident, mapped,
|
|
|
|
retained;
|
2017-05-13 03:30:33 +08:00
|
|
|
size_t num_background_threads;
|
|
|
|
uint64_t background_thread_num_runs, background_thread_run_interval;
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
CTL_GET("stats.allocated", &allocated, size_t);
|
|
|
|
CTL_GET("stats.active", &active, size_t);
|
|
|
|
CTL_GET("stats.metadata", &metadata, size_t);
|
2017-08-26 04:24:49 +08:00
|
|
|
CTL_GET("stats.metadata_thp", &metadata_thp, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("stats.resident", &resident, size_t);
|
|
|
|
CTL_GET("stats.mapped", &mapped, size_t);
|
|
|
|
CTL_GET("stats.retained", &retained, size_t);
|
2017-03-12 12:28:31 +08:00
|
|
|
|
2017-12-31 06:31:34 +08:00
|
|
|
uint64_t mutex_stats_uint64_t[mutex_prof_num_global_mutexes][mutex_prof_num_uint64_t_counters];
|
|
|
|
uint32_t mutex_stats_uint32_t[mutex_prof_num_global_mutexes][mutex_prof_num_uint32_t_counters];
|
2017-03-14 08:29:03 +08:00
|
|
|
if (mutex) {
|
2017-12-31 06:31:34 +08:00
|
|
|
read_global_mutex_stats(mutex_stats_uint64_t, mutex_stats_uint32_t);
|
2017-03-12 17:28:52 +08:00
|
|
|
}
|
2017-03-12 12:28:31 +08:00
|
|
|
|
2017-05-13 03:30:33 +08:00
|
|
|
if (have_background_thread) {
|
|
|
|
CTL_GET("stats.background_thread.num_threads",
|
|
|
|
&num_background_threads, size_t);
|
|
|
|
CTL_GET("stats.background_thread.num_runs",
|
|
|
|
&background_thread_num_runs, uint64_t);
|
|
|
|
CTL_GET("stats.background_thread.run_interval",
|
|
|
|
&background_thread_run_interval, uint64_t);
|
|
|
|
} else {
|
|
|
|
num_background_threads = 0;
|
|
|
|
background_thread_num_runs = 0;
|
|
|
|
background_thread_run_interval = 0;
|
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"stats\": {\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"allocated\": %zu,\n", allocated);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"active\": %zu,\n", active);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"metadata\": %zu,\n", metadata);
|
2017-08-26 04:24:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"metadata_thp\": %zu,\n", metadata_thp);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"resident\": %zu,\n", resident);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"mapped\": %zu,\n", mapped);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-05-13 03:30:33 +08:00
|
|
|
"\t\t\t\"retained\": %zu,\n", retained);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"background_thread\": {\n");
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"num_threads\": %zu,\n", num_background_threads);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"num_runs\": %"FMTu64",\n",
|
|
|
|
background_thread_num_runs);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"run_interval\": %"FMTu64"\n",
|
|
|
|
background_thread_run_interval);
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "\t\t\t}%s\n",
|
|
|
|
mutex ? "," : "");
|
|
|
|
|
2017-03-14 08:29:03 +08:00
|
|
|
if (mutex) {
|
2017-03-12 17:28:52 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-03-14 08:29:03 +08:00
|
|
|
"\t\t\t\"mutexes\": {\n");
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_global_ind_t i;
|
|
|
|
for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_stats_output_json(write_cb, cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
global_mutex_names[i], mutex_stats_uint64_t[i], mutex_stats_uint32_t[i],
|
2017-03-22 02:56:38 +08:00
|
|
|
"\t\t\t\t",
|
2017-04-25 08:09:56 +08:00
|
|
|
i == mutex_prof_num_global_mutexes - 1);
|
2017-03-12 17:28:52 +08:00
|
|
|
}
|
|
|
|
malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n");
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-03-12 17:28:52 +08:00
|
|
|
"\t\t}%s\n", (merged || unmerged || destroyed) ? "," : "");
|
2016-11-01 13:30:49 +08:00
|
|
|
} else {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-08-26 04:24:49 +08:00
|
|
|
"Allocated: %zu, active: %zu, metadata: %zu (n_thp %zu),"
|
2016-05-04 13:11:35 +08:00
|
|
|
" resident: %zu, mapped: %zu, retained: %zu\n",
|
2017-08-26 04:24:49 +08:00
|
|
|
allocated, active, metadata, metadata_thp, resident, mapped,
|
|
|
|
retained);
|
2017-05-13 03:30:33 +08:00
|
|
|
|
|
|
|
if (have_background_thread && num_background_threads > 0) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"Background threads: %zu, num_runs: %"FMTu64", "
|
|
|
|
"run_interval: %"FMTu64" ns\n",
|
|
|
|
num_background_threads,
|
|
|
|
background_thread_num_runs,
|
|
|
|
background_thread_run_interval);
|
|
|
|
}
|
2017-03-14 08:29:03 +08:00
|
|
|
if (mutex) {
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_global_ind_t i;
|
|
|
|
for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_stats_output(write_cb, cbopaque,
|
2017-12-31 06:31:34 +08:00
|
|
|
global_mutex_names[i], mutex_stats_uint64_t[i], mutex_stats_uint32_t[i],
|
2017-03-12 17:28:52 +08:00
|
|
|
i == 0);
|
|
|
|
}
|
2017-03-12 12:28:31 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-01-04 09:21:59 +08:00
|
|
|
if (merged || destroyed || unmerged) {
|
2016-11-01 13:30:49 +08:00
|
|
|
unsigned narenas;
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"stats.arenas\": {\n");
|
|
|
|
}
|
2010-01-25 09:21:47 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.narenas", &narenas, unsigned);
|
|
|
|
{
|
2017-01-05 02:21:53 +08:00
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t);
|
|
|
|
size_t sz;
|
2016-11-01 13:30:49 +08:00
|
|
|
VARIABLE_ARRAY(bool, initialized, narenas);
|
2017-01-04 09:21:59 +08:00
|
|
|
bool destroyed_initialized;
|
2016-11-01 13:30:49 +08:00
|
|
|
unsigned i, j, ninitialized;
|
|
|
|
|
2017-01-05 02:21:53 +08:00
|
|
|
xmallctlnametomib("arena.0.initialized", mib, &miblen);
|
2016-11-01 13:30:49 +08:00
|
|
|
for (i = ninitialized = 0; i < narenas; i++) {
|
2017-01-05 02:21:53 +08:00
|
|
|
mib[1] = i;
|
|
|
|
sz = sizeof(bool);
|
|
|
|
xmallctlbymib(mib, miblen, &initialized[i], &sz,
|
|
|
|
NULL, 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (initialized[i]) {
|
2016-11-01 13:30:49 +08:00
|
|
|
ninitialized++;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2017-01-04 09:21:59 +08:00
|
|
|
mib[1] = MALLCTL_ARENAS_DESTROYED;
|
|
|
|
sz = sizeof(bool);
|
|
|
|
xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
|
|
|
|
NULL, 0);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
/* Merged stats. */
|
|
|
|
if (merged && (ninitialized > 1 || !unmerged)) {
|
|
|
|
/* Print merged arena stats. */
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"merged\": {\n");
|
|
|
|
} else {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2010-01-31 19:57:29 +08:00
|
|
|
"\nMerged arenas stats:\n");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
stats_arena_print(write_cb, cbopaque, json,
|
2017-03-14 08:29:03 +08:00
|
|
|
MALLCTL_ARENAS_ALL, bins, large, mutex);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-01-18 17:01:19 +08:00
|
|
|
"\t\t\t}%s\n",
|
|
|
|
((destroyed_initialized &&
|
|
|
|
destroyed) || unmerged) ? "," :
|
|
|
|
"");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 09:21:59 +08:00
|
|
|
/* Destroyed stats. */
|
|
|
|
if (destroyed_initialized && destroyed) {
|
|
|
|
/* Print destroyed arena stats. */
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"destroyed\": {\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\nDestroyed arenas stats:\n");
|
|
|
|
}
|
|
|
|
stats_arena_print(write_cb, cbopaque, json,
|
2017-03-12 17:28:52 +08:00
|
|
|
MALLCTL_ARENAS_DESTROYED, bins, large,
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex);
|
2017-01-04 09:21:59 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-01-18 17:01:19 +08:00
|
|
|
"\t\t\t}%s\n", unmerged ? "," :
|
|
|
|
"");
|
2017-01-04 09:21:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* Unmerged stats. */
|
2017-01-18 17:01:19 +08:00
|
|
|
if (unmerged) {
|
|
|
|
for (i = j = 0; i < narenas; i++) {
|
|
|
|
if (initialized[i]) {
|
|
|
|
if (json) {
|
|
|
|
j++;
|
|
|
|
malloc_cprintf(write_cb,
|
|
|
|
cbopaque,
|
|
|
|
"\t\t\t\"%u\": {\n",
|
|
|
|
i);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb,
|
|
|
|
cbopaque,
|
|
|
|
"\narenas[%u]:\n",
|
|
|
|
i);
|
|
|
|
}
|
|
|
|
stats_arena_print(write_cb,
|
|
|
|
cbopaque, json, i, bins,
|
2017-03-14 08:29:03 +08:00
|
|
|
large, mutex);
|
2017-01-18 17:01:19 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb,
|
|
|
|
cbopaque,
|
|
|
|
"\t\t\t}%s\n", (j <
|
|
|
|
ninitialized) ? ","
|
|
|
|
: "");
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2010-01-25 09:21:47 +08:00
|
|
|
}
|
2010-01-18 09:35:19 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t}\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
void
|
|
|
|
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-16 08:56:30 +08:00
|
|
|
const char *opts) {
|
2016-11-01 13:30:49 +08:00
|
|
|
int err;
|
|
|
|
uint64_t epoch;
|
|
|
|
size_t u64sz;
|
2017-05-28 06:35:36 +08:00
|
|
|
#define OPTION(o, v, d, s) bool v = d;
|
|
|
|
STATS_PRINT_OPTIONS
|
|
|
|
#undef OPTION
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/*
|
|
|
|
* Refresh stats, in case mallctl() was called by the application.
|
|
|
|
*
|
|
|
|
* Check for OOM here, since refreshing the ctl cache can trigger
|
|
|
|
* allocation. In practice, none of the subsequent mallctl()-related
|
|
|
|
* calls in this function will cause OOM if this one succeeds.
|
|
|
|
* */
|
|
|
|
epoch = 1;
|
|
|
|
u64sz = sizeof(uint64_t);
|
2016-11-16 07:01:03 +08:00
|
|
|
err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
|
|
|
|
sizeof(uint64_t));
|
2016-11-01 13:30:49 +08:00
|
|
|
if (err != 0) {
|
|
|
|
if (err == EAGAIN) {
|
|
|
|
malloc_write("<jemalloc>: Memory allocation failure in "
|
|
|
|
"mallctl(\"epoch\", ...)\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
|
|
|
|
"...)\n");
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (opts != NULL) {
|
2017-05-28 06:35:36 +08:00
|
|
|
for (unsigned i = 0; opts[i] != '\0'; i++) {
|
2016-11-01 13:30:49 +08:00
|
|
|
switch (opts[i]) {
|
2017-05-28 06:35:36 +08:00
|
|
|
#define OPTION(o, v, d, s) case o: v = s; break;
|
|
|
|
STATS_PRINT_OPTIONS
|
|
|
|
#undef OPTION
|
2016-11-01 13:30:49 +08:00
|
|
|
default:;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
|
2018-03-02 09:29:58 +08:00
|
|
|
emitter_t emitter;
|
|
|
|
emitter_init(&emitter,
|
|
|
|
json ? emitter_output_json : emitter_output_table, write_cb,
|
|
|
|
cbopaque);
|
|
|
|
emitter_begin(&emitter);
|
|
|
|
emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
|
|
|
|
emitter_json_dict_begin(&emitter, "jemalloc");
|
|
|
|
|
2016-12-24 03:15:44 +08:00
|
|
|
if (general) {
|
2018-03-02 09:38:15 +08:00
|
|
|
stats_general_print(&emitter, config_stats);
|
2016-12-24 03:15:44 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
if (config_stats) {
|
2017-01-04 09:21:59 +08:00
|
|
|
stats_print_helper(write_cb, cbopaque, json, merged, destroyed,
|
2017-03-14 08:29:03 +08:00
|
|
|
unmerged, bins, large, mutex);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
|
2018-03-02 09:29:58 +08:00
|
|
|
emitter_json_dict_end(&emitter); /* Closes the "jemalloc" dict. */
|
|
|
|
emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
|
|
|
|
emitter_end(&emitter);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|