2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_STATS_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CTL_GET(n, v, t) do { \
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t sz = sizeof(t); \
|
2016-10-28 12:31:25 +08:00
|
|
|
xmallctl(n, (void *)v, &sz, NULL, 0); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CTL_M2_GET(n, i, v, t) do { \
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t mib[6]; \
|
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
|
|
|
size_t sz = sizeof(t); \
|
|
|
|
xmallctlnametomib(n, mib, &miblen); \
|
2015-03-21 09:08:10 +08:00
|
|
|
mib[2] = (i); \
|
2016-10-28 12:31:25 +08:00
|
|
|
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t mib[6]; \
|
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
|
|
|
size_t sz = sizeof(t); \
|
|
|
|
xmallctlnametomib(n, mib, &miblen); \
|
2015-03-21 09:08:10 +08:00
|
|
|
mib[2] = (i); \
|
|
|
|
mib[4] = (j); \
|
2016-10-28 12:31:25 +08:00
|
|
|
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} while (0)
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
|
|
|
bool opt_stats_print = false;
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
static void
|
2010-03-04 09:45:38 +08:00
|
|
|
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool json, bool large, unsigned i) {
|
2012-04-02 22:15:42 +08:00
|
|
|
size_t page;
|
2017-01-18 17:01:19 +08:00
|
|
|
bool in_gap, in_gap_prev;
|
2014-10-13 13:53:59 +08:00
|
|
|
unsigned nbins, j;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2012-04-02 22:15:42 +08:00
|
|
|
CTL_GET("arenas.page", &page, size_t);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
|
|
|
if (json) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t\"bins\": [\n");
|
2010-01-28 05:10:55 +08:00
|
|
|
} else {
|
2016-11-01 13:30:49 +08:00
|
|
|
if (config_tcache) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"bins: size ind allocated nmalloc"
|
|
|
|
" ndalloc nrequests curregs"
|
|
|
|
" curslabs regs pgs util nfills"
|
|
|
|
" nflushes newslabs reslabs\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"bins: size ind allocated nmalloc"
|
|
|
|
" ndalloc nrequests curregs"
|
|
|
|
" curslabs regs pgs util newslabs"
|
|
|
|
" reslabs\n");
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
for (j = 0, in_gap = false; j < nbins; j++) {
|
2016-05-30 09:34:50 +08:00
|
|
|
uint64_t nslabs;
|
2016-11-01 13:30:49 +08:00
|
|
|
size_t reg_size, slab_size, curregs;
|
|
|
|
size_t curslabs;
|
|
|
|
uint32_t nregs;
|
|
|
|
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
|
|
|
uint64_t nreslabs;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
|
2015-03-21 09:08:10 +08:00
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
in_gap_prev = in_gap;
|
2016-11-02 06:26:35 +08:00
|
|
|
in_gap = (nslabs == 0);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (!json && in_gap_prev && !in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t);
|
|
|
|
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
|
|
|
|
CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
|
|
|
|
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
|
|
|
|
size_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
|
|
|
|
&nrequests, uint64_t);
|
|
|
|
if (config_tcache) {
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
|
|
|
|
&nfills, uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
|
|
|
|
&nflushes, uint64_t);
|
|
|
|
}
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
|
|
|
|
size_t);
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t{\n"
|
|
|
|
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"curregs\": %zu,\n"
|
|
|
|
"\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
|
|
|
|
nmalloc,
|
|
|
|
ndalloc,
|
|
|
|
curregs,
|
|
|
|
nrequests);
|
2010-01-28 05:10:55 +08:00
|
|
|
if (config_tcache) {
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
|
|
|
|
nfills,
|
|
|
|
nflushes);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
|
|
|
|
"\t\t\t\t\t\t\"curslabs\": %zu\n"
|
|
|
|
"\t\t\t\t\t}%s\n",
|
|
|
|
nreslabs,
|
|
|
|
curslabs,
|
|
|
|
(j + 1 < nbins) ? "," : "");
|
|
|
|
} else if (!in_gap) {
|
|
|
|
size_t availregs, milli;
|
|
|
|
char util[6]; /* "x.yyy". */
|
2014-10-16 07:18:42 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
availregs = nregs * curslabs;
|
2014-10-16 07:18:42 +08:00
|
|
|
milli = (availregs != 0) ? (1000 * curregs) / availregs
|
|
|
|
: 1000;
|
2017-01-31 07:54:16 +08:00
|
|
|
|
|
|
|
if (milli > 1000) {
|
|
|
|
/*
|
|
|
|
* Race detected: the counters were read in
|
|
|
|
* separate mallctl calls and concurrent
|
|
|
|
* operations happened in between. In this case
|
|
|
|
* no meaningful utilization can be computed.
|
|
|
|
*/
|
|
|
|
malloc_snprintf(util, sizeof(util), " race");
|
|
|
|
} else if (milli < 10) {
|
2015-07-08 04:12:05 +08:00
|
|
|
malloc_snprintf(util, sizeof(util),
|
2015-07-24 04:56:25 +08:00
|
|
|
"0.00%zu", milli);
|
2014-10-16 07:18:42 +08:00
|
|
|
} else if (milli < 100) {
|
2015-07-24 04:56:25 +08:00
|
|
|
malloc_snprintf(util, sizeof(util), "0.0%zu",
|
2014-10-16 07:18:42 +08:00
|
|
|
milli);
|
|
|
|
} else if (milli < 1000) {
|
2015-07-24 04:56:25 +08:00
|
|
|
malloc_snprintf(util, sizeof(util), "0.%zu",
|
2014-10-16 07:18:42 +08:00
|
|
|
milli);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-01-31 07:54:16 +08:00
|
|
|
assert(milli == 1000);
|
2014-10-16 07:18:42 +08:00
|
|
|
malloc_snprintf(util, sizeof(util), "1");
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-16 07:18:42 +08:00
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
if (config_tcache) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2015-07-24 04:56:25 +08:00
|
|
|
"%20zu %3u %12zu %12"FMTu64
|
|
|
|
" %12"FMTu64" %12"FMTu64" %12zu"
|
|
|
|
" %12zu %4u %3zu %-5s %12"FMTu64
|
|
|
|
" %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
|
2014-10-13 13:53:59 +08:00
|
|
|
reg_size, j, curregs * reg_size, nmalloc,
|
2016-05-30 09:34:50 +08:00
|
|
|
ndalloc, nrequests, curregs, curslabs,
|
|
|
|
nregs, slab_size / page, util, nfills,
|
2016-11-01 13:30:49 +08:00
|
|
|
nflushes, nslabs, nreslabs);
|
2010-01-28 05:10:55 +08:00
|
|
|
} else {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2015-07-24 04:56:25 +08:00
|
|
|
"%20zu %3u %12zu %12"FMTu64
|
|
|
|
" %12"FMTu64" %12"FMTu64" %12zu"
|
|
|
|
" %12zu %4u %3zu %-5s %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
2014-10-13 13:53:59 +08:00
|
|
|
reg_size, j, curregs * reg_size, nmalloc,
|
2016-05-30 09:34:50 +08:00
|
|
|
ndalloc, nrequests, curregs, curslabs,
|
|
|
|
nregs, slab_size / page, util, nslabs,
|
2016-11-01 13:30:49 +08:00
|
|
|
nreslabs);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
2014-10-13 13:53:59 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t]%s\n", large ? "," : "");
|
|
|
|
} else {
|
|
|
|
if (in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
static void
|
2016-06-01 05:50:21 +08:00
|
|
|
stats_arena_lextents_print(void (*write_cb)(void *, const char *),
|
2017-01-16 08:56:30 +08:00
|
|
|
void *cbopaque, bool json, unsigned i) {
|
2016-06-01 05:50:21 +08:00
|
|
|
unsigned nbins, nlextents, j;
|
2016-11-01 13:30:49 +08:00
|
|
|
bool in_gap, in_gap_prev;
|
2014-10-13 13:53:59 +08:00
|
|
|
|
|
|
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_GET("arenas.nlextents", &nlextents, unsigned);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"lextents\": [\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"large: size ind allocated nmalloc"
|
|
|
|
" ndalloc nrequests curlextents\n");
|
|
|
|
}
|
2016-06-01 05:50:21 +08:00
|
|
|
for (j = 0, in_gap = false; j < nlextents; j++) {
|
2014-10-13 13:53:59 +08:00
|
|
|
uint64_t nmalloc, ndalloc, nrequests;
|
2016-06-01 05:50:21 +08:00
|
|
|
size_t lextent_size, curlextents;
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
|
2015-03-21 09:08:10 +08:00
|
|
|
&nmalloc, uint64_t);
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
|
2015-03-21 09:08:10 +08:00
|
|
|
&ndalloc, uint64_t);
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
|
2015-03-21 09:08:10 +08:00
|
|
|
&nrequests, uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
in_gap_prev = in_gap;
|
2016-11-02 06:26:35 +08:00
|
|
|
in_gap = (nrequests == 0);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
if (!json && in_gap_prev && !in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
|
|
|
|
CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
|
|
|
|
&curlextents, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t{\n"
|
|
|
|
"\t\t\t\t\t\t\"curlextents\": %zu\n"
|
|
|
|
"\t\t\t\t\t}%s\n",
|
|
|
|
curlextents,
|
|
|
|
(j + 1 < nlextents) ? "," : "");
|
|
|
|
} else if (!in_gap) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2015-07-24 04:56:25 +08:00
|
|
|
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64" %12zu\n",
|
2016-06-01 05:50:21 +08:00
|
|
|
lextent_size, nbins + j,
|
|
|
|
curlextents * lextent_size, nmalloc, ndalloc,
|
|
|
|
nrequests, curlextents);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
2014-10-13 13:53:59 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t]\n");
|
|
|
|
} else {
|
|
|
|
if (in_gap) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" ---\n");
|
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2010-03-04 09:45:38 +08:00
|
|
|
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool json, unsigned i, bool bins, bool large) {
|
2011-03-19 04:41:33 +08:00
|
|
|
unsigned nthreads;
|
2012-10-12 04:53:15 +08:00
|
|
|
const char *dss;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
ssize_t dirty_decay_time, muzzy_decay_time;
|
|
|
|
size_t page, pactive, pdirty, pmuzzy, mapped, retained;
|
2016-12-23 06:39:10 +08:00
|
|
|
size_t base, internal, resident;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
|
|
|
|
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t small_allocated;
|
2010-03-14 12:32:56 +08:00
|
|
|
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
2016-06-01 05:50:21 +08:00
|
|
|
size_t large_allocated;
|
|
|
|
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
2016-04-23 09:37:44 +08:00
|
|
|
size_t tcache_bytes;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2012-04-02 22:15:42 +08:00
|
|
|
CTL_GET("arenas.page", &page, size_t);
|
2010-01-30 03:24:19 +08:00
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"nthreads\": %u,\n", nthreads);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"assigned threads: %u\n", nthreads);
|
|
|
|
}
|
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"dss\": \"%s\",\n", dss);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"dss allocation precedence: %s\n", dss);
|
|
|
|
}
|
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_decay_time", i, &dirty_decay_time,
|
|
|
|
ssize_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_decay_time", i, &muzzy_decay_time,
|
|
|
|
ssize_t);
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
|
|
|
|
uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"dirty_decay_time\": %zd,\n", dirty_decay_time);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_decay_time\": %zd,\n", muzzy_decay_time);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"pactive\": %zu,\n", pactive);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"pdirty\": %zu,\n", pdirty);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise);
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged);
|
2016-11-01 13:30:49 +08:00
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"decaying: time npages sweeps madvises"
|
|
|
|
" purged\n");
|
|
|
|
if (dirty_decay_time >= 0) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
|
|
|
|
FMTu64"\n", dirty_decay_time, pdirty, dirty_npurge,
|
|
|
|
dirty_nmadvise, dirty_purged);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
|
|
|
|
FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise,
|
|
|
|
dirty_purged);
|
|
|
|
}
|
|
|
|
if (muzzy_decay_time >= 0) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
|
|
|
|
FMTu64"\n", muzzy_decay_time, pmuzzy, muzzy_npurge,
|
|
|
|
muzzy_nmadvise, muzzy_purged);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
|
|
|
|
FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise,
|
|
|
|
muzzy_purged);
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
|
|
|
|
size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
|
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"small\": {\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t},\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
" allocated nmalloc"
|
|
|
|
" ndalloc nrequests\n");
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"small: %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
|
|
|
small_allocated, small_nmalloc, small_ndalloc,
|
|
|
|
small_nrequests);
|
|
|
|
}
|
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
|
|
|
|
size_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
|
|
|
|
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
|
2015-03-21 09:08:10 +08:00
|
|
|
uint64_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"large\": {\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t},\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"large: %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
|
|
|
large_allocated, large_nmalloc, large_ndalloc,
|
|
|
|
large_nrequests);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"total: %12zu %12"FMTu64" %12"FMTu64
|
|
|
|
" %12"FMTu64"\n",
|
|
|
|
small_allocated + large_allocated, small_nmalloc +
|
|
|
|
large_nmalloc, small_ndalloc + large_ndalloc,
|
|
|
|
small_nrequests + large_nrequests);
|
|
|
|
}
|
|
|
|
if (!json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"active: %12zu\n", pactive * page);
|
|
|
|
}
|
|
|
|
|
2015-03-21 09:08:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"mapped\": %zu,\n", mapped);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"mapped: %12zu\n", mapped);
|
|
|
|
}
|
|
|
|
|
2016-05-04 13:11:35 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"retained\": %zu,\n", retained);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"retained: %12zu\n", retained);
|
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.base", i, &base, size_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-23 06:39:10 +08:00
|
|
|
"\t\t\t\t\"base\": %zu,\n", base);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"base: %12zu\n", base);
|
|
|
|
}
|
|
|
|
|
|
|
|
CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"internal\": %zu,\n", internal);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"internal: %12zu\n", internal);
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:37:44 +08:00
|
|
|
if (config_tcache) {
|
|
|
|
CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes,
|
|
|
|
size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"tcache: %12zu\n", tcache_bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-23 06:39:10 +08:00
|
|
|
CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\"resident\": %zu%s\n", resident, (bins || large) ?
|
2016-11-01 13:30:49 +08:00
|
|
|
"," : "");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-23 06:39:10 +08:00
|
|
|
"resident: %12zu\n", resident);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (bins) {
|
2016-11-01 13:30:49 +08:00
|
|
|
stats_arena_bins_print(write_cb, cbopaque, json, large, i);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (large) {
|
2016-11-01 13:30:49 +08:00
|
|
|
stats_arena_lextents_print(write_cb, cbopaque, json, i);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
static void
|
|
|
|
stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool json, bool more) {
|
2016-11-01 13:30:49 +08:00
|
|
|
const char *cpv;
|
|
|
|
bool bv;
|
|
|
|
unsigned uv;
|
|
|
|
uint32_t u32v;
|
|
|
|
uint64_t u64v;
|
|
|
|
ssize_t ssv;
|
|
|
|
size_t sv, bsz, usz, ssz, sssz, cpsz;
|
|
|
|
|
|
|
|
bsz = sizeof(bool);
|
|
|
|
usz = sizeof(unsigned);
|
|
|
|
ssz = sizeof(size_t);
|
|
|
|
sssz = sizeof(ssize_t);
|
|
|
|
cpsz = sizeof(const char *);
|
|
|
|
|
|
|
|
CTL_GET("version", &cpv, const char *);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"version\": \"%s\",\n", cpv);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* config. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define CONFIG_WRITE_BOOL_JSON(n, c) \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) { \
|
|
|
|
CTL_GET("config."#n, &bv, bool); \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
|
|
|
|
(c)); \
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"config\": {\n");
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("config.debug", &bv, bool);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
|
|
|
|
} else {
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
|
|
|
|
bv ? "enabled" : "disabled");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CONFIG_WRITE_BOOL_JSON(fill, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"malloc_conf\": \"%s\",\n",
|
|
|
|
config_malloc_conf);
|
|
|
|
} else {
|
2016-02-08 06:23:22 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"config.malloc_conf: \"%s\"\n", config_malloc_conf);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CONFIG_WRITE_BOOL_JSON(munmap, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(prof, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(stats, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(tcache, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(tls, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(utrace, ",")
|
|
|
|
CONFIG_WRITE_BOOL_JSON(xmalloc, "")
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t},\n");
|
|
|
|
}
|
|
|
|
#undef CONFIG_WRITE_BOOL_JSON
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* opt. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define OPT_WRITE_BOOL(n, c) \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
|
|
|
|
if (json) { \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
|
|
|
|
"false", (c)); \
|
|
|
|
} else { \
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
" opt."#n": %s\n", bv ? "true" : "false"); \
|
2016-11-01 13:30:49 +08:00
|
|
|
} \
|
|
|
|
}
|
2017-01-20 13:41:41 +08:00
|
|
|
#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
|
2016-11-01 13:30:49 +08:00
|
|
|
bool bv2; \
|
|
|
|
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
|
2016-11-16 07:01:03 +08:00
|
|
|
je_mallctl(#m, (void *)&bv2, &bsz, NULL, 0) == 0) { \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) { \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
|
|
|
|
"false", (c)); \
|
|
|
|
} else { \
|
2014-10-04 14:25:30 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
|
|
|
|
: "false", bv2 ? "true" : "false"); \
|
|
|
|
} \
|
2016-11-01 13:30:49 +08:00
|
|
|
} \
|
2014-10-04 14:25:30 +08:00
|
|
|
}
|
2017-01-20 13:41:41 +08:00
|
|
|
#define OPT_WRITE_UNSIGNED(n, c) \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
|
|
|
|
if (json) { \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": %u%s\n", uv, (c)); \
|
|
|
|
} else { \
|
2016-02-25 03:03:40 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
2016-04-12 09:47:18 +08:00
|
|
|
" opt."#n": %u\n", uv); \
|
2016-11-01 13:30:49 +08:00
|
|
|
} \
|
|
|
|
}
|
2017-01-20 13:41:41 +08:00
|
|
|
#define OPT_WRITE_SSIZE_T(n, c) \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
|
|
|
|
if (json) { \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
|
|
|
|
} else { \
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
2015-07-24 04:56:25 +08:00
|
|
|
" opt."#n": %zd\n", ssv); \
|
2016-11-01 13:30:49 +08:00
|
|
|
} \
|
|
|
|
}
|
2017-01-20 13:41:41 +08:00
|
|
|
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
|
2016-11-01 13:30:49 +08:00
|
|
|
ssize_t ssv2; \
|
|
|
|
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
|
2016-11-16 07:01:03 +08:00
|
|
|
je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) { \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
|
|
|
|
} else { \
|
2015-03-21 09:08:10 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
2015-07-24 04:56:25 +08:00
|
|
|
" opt."#n": %zd ("#m": %zd)\n", \
|
2015-07-08 04:12:05 +08:00
|
|
|
ssv, ssv2); \
|
2015-03-22 01:18:39 +08:00
|
|
|
} \
|
2016-11-01 13:30:49 +08:00
|
|
|
} \
|
2015-03-22 01:18:39 +08:00
|
|
|
}
|
2017-01-20 13:41:41 +08:00
|
|
|
#define OPT_WRITE_CHAR_P(n, c) \
|
2016-11-01 13:30:49 +08:00
|
|
|
if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
|
|
|
|
if (json) { \
|
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
"\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
|
|
|
|
} else { \
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, \
|
|
|
|
" opt."#n": \"%s\"\n", cpv); \
|
2016-11-01 13:30:49 +08:00
|
|
|
} \
|
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"opt\": {\n");
|
|
|
|
} else {
|
2012-05-02 19:15:00 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"Run-time option settings:\n");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
OPT_WRITE_BOOL(abort, ",")
|
|
|
|
OPT_WRITE_CHAR_P(dss, ",")
|
|
|
|
OPT_WRITE_UNSIGNED(narenas, ",")
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
OPT_WRITE_CHAR_P(percpu_arena, ",")
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_time, arenas.dirty_decay_time,
|
|
|
|
",")
|
|
|
|
OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_time, arenas.muzzy_decay_time,
|
|
|
|
",")
|
2016-11-01 13:30:49 +08:00
|
|
|
OPT_WRITE_CHAR_P(junk, ",")
|
|
|
|
OPT_WRITE_BOOL(zero, ",")
|
|
|
|
OPT_WRITE_BOOL(utrace, ",")
|
|
|
|
OPT_WRITE_BOOL(xmalloc, ",")
|
|
|
|
OPT_WRITE_BOOL(tcache, ",")
|
|
|
|
OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
|
|
|
|
OPT_WRITE_BOOL(prof, ",")
|
|
|
|
OPT_WRITE_CHAR_P(prof_prefix, ",")
|
|
|
|
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
|
|
|
|
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
|
|
|
|
",")
|
|
|
|
OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
|
|
|
|
OPT_WRITE_BOOL(prof_accum, ",")
|
|
|
|
OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
|
|
|
|
OPT_WRITE_BOOL(prof_gdump, ",")
|
|
|
|
OPT_WRITE_BOOL(prof_final, ",")
|
|
|
|
OPT_WRITE_BOOL(prof_leak, ",")
|
|
|
|
/*
|
|
|
|
* stats_print is always emitted, so as long as stats_print comes last
|
|
|
|
* it's safe to unconditionally omit the comma here (rather than having
|
|
|
|
* to conditionally omit it elsewhere depending on configuration).
|
|
|
|
*/
|
|
|
|
OPT_WRITE_BOOL(stats_print, "")
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t},\n");
|
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
|
|
|
#undef OPT_WRITE_BOOL
|
2014-10-04 14:25:30 +08:00
|
|
|
#undef OPT_WRITE_BOOL_MUTABLE
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef OPT_WRITE_SSIZE_T
|
|
|
|
#undef OPT_WRITE_CHAR_P
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* arenas. */
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"arenas\": {\n");
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.narenas", &uv, unsigned);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"narenas\": %u,\n", uv);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2012-10-12 04:53:15 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
CTL_GET("arenas.dirty_decay_time", &ssv, ssize_t);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
"\t\t\t\"dirty_decay_time\": %zd,\n", ssv);
|
2016-11-01 13:30:49 +08:00
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"Unused dirty page decay time: %zd%s\n", ssv, (ssv < 0) ?
|
|
|
|
" (no decay)" : "");
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
CTL_GET("arenas.muzzy_decay_time", &ssv, ssize_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"muzzy_decay_time\": %zd,\n", ssv);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"Unused muzzy page decay time: %zd%s\n", ssv, (ssv < 0) ?
|
|
|
|
" (no decay)" : "");
|
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.quantum", &sv, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"quantum\": %zu,\n", sv);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.page", &sv, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"page\": %zu,\n", sv);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2015-07-24 04:56:25 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-04-02 22:04:34 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"tcache_max\": %zu,\n", sv);
|
|
|
|
} else {
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2015-07-24 04:56:25 +08:00
|
|
|
"Maximum thread-cached size class: %zu\n", sv);
|
2010-03-18 07:27:39 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
unsigned nbins, nlextents, i;
|
|
|
|
|
|
|
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"nbins\": %u,\n", nbins);
|
|
|
|
|
2017-01-18 17:01:19 +08:00
|
|
|
if (config_tcache) {
|
|
|
|
CTL_GET("arenas.nhbins", &uv, unsigned);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"nhbins\": %u,\n", uv);
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"bin\": [\n");
|
|
|
|
for (i = 0; i < nbins; i++) {
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-11-01 13:30:49 +08:00
|
|
|
"\t\t\t\t{\n");
|
2010-03-02 12:15:26 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"size\": %zu,\n", sv);
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"slab_size\": %zu\n", sv);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
|
|
|
|
}
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t],\n");
|
|
|
|
|
|
|
|
CTL_GET("arenas.nlextents", &nlextents, unsigned);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"nlextents\": %u,\n", nlextents);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"lextent\": [\n");
|
|
|
|
for (i = 0; i < nlextents; i++) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t{\n");
|
|
|
|
|
|
|
|
CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t\t\"size\": %zu\n", sv);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\t}%s\n", (i + 1 < nlextents) ? "," : "");
|
2010-02-12 05:19:21 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t]\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-24 03:15:44 +08:00
|
|
|
"\t\t}%s\n", (config_prof || more) ? "," : "");
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* prof. */
|
2016-12-24 03:15:44 +08:00
|
|
|
if (config_prof && json) {
|
2016-11-01 13:30:49 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"prof\": {\n");
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("prof.thread_active_init", &bv, bool);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
|
|
|
|
"false");
|
|
|
|
|
|
|
|
CTL_GET("prof.active", &bv, bool);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"active\": %s,\n", bv ? "true" : "false");
|
|
|
|
|
|
|
|
CTL_GET("prof.gdump", &bv, bool);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
|
|
|
|
|
|
|
|
CTL_GET("prof.interval", &u64v, uint64_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"interval\": %"FMTu64",\n", u64v);
|
|
|
|
|
|
|
|
CTL_GET("prof.lg_sample", &ssv, ssize_t);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"lg_sample\": %zd\n", ssv);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2016-12-24 03:15:44 +08:00
|
|
|
"\t\t}%s\n", more ? "," : "");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-04 09:21:59 +08:00
|
|
|
bool json, bool merged, bool destroyed, bool unmerged, bool bins,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool large) {
|
2016-11-01 13:30:49 +08:00
|
|
|
size_t allocated, active, metadata, resident, mapped, retained;
|
|
|
|
|
|
|
|
CTL_GET("stats.allocated", &allocated, size_t);
|
|
|
|
CTL_GET("stats.active", &active, size_t);
|
|
|
|
CTL_GET("stats.metadata", &metadata, size_t);
|
|
|
|
CTL_GET("stats.resident", &resident, size_t);
|
|
|
|
CTL_GET("stats.mapped", &mapped, size_t);
|
|
|
|
CTL_GET("stats.retained", &retained, size_t);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"stats\": {\n");
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"allocated\": %zu,\n", allocated);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"active\": %zu,\n", active);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"metadata\": %zu,\n", metadata);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"resident\": %zu,\n", resident);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"mapped\": %zu,\n", mapped);
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"retained\": %zu\n", retained);
|
|
|
|
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t}%s\n", (merged || unmerged) ? "," : "");
|
|
|
|
} else {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2015-07-24 04:56:25 +08:00
|
|
|
"Allocated: %zu, active: %zu, metadata: %zu,"
|
2016-05-04 13:11:35 +08:00
|
|
|
" resident: %zu, mapped: %zu, retained: %zu\n",
|
|
|
|
allocated, active, metadata, resident, mapped, retained);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-01-04 09:21:59 +08:00
|
|
|
if (merged || destroyed || unmerged) {
|
2016-11-01 13:30:49 +08:00
|
|
|
unsigned narenas;
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\"stats.arenas\": {\n");
|
|
|
|
}
|
2010-01-25 09:21:47 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
CTL_GET("arenas.narenas", &narenas, unsigned);
|
|
|
|
{
|
2017-01-05 02:21:53 +08:00
|
|
|
size_t mib[3];
|
|
|
|
size_t miblen = sizeof(mib) / sizeof(size_t);
|
|
|
|
size_t sz;
|
2016-11-01 13:30:49 +08:00
|
|
|
VARIABLE_ARRAY(bool, initialized, narenas);
|
2017-01-04 09:21:59 +08:00
|
|
|
bool destroyed_initialized;
|
2016-11-01 13:30:49 +08:00
|
|
|
unsigned i, j, ninitialized;
|
|
|
|
|
2017-01-05 02:21:53 +08:00
|
|
|
xmallctlnametomib("arena.0.initialized", mib, &miblen);
|
2016-11-01 13:30:49 +08:00
|
|
|
for (i = ninitialized = 0; i < narenas; i++) {
|
2017-01-05 02:21:53 +08:00
|
|
|
mib[1] = i;
|
|
|
|
sz = sizeof(bool);
|
|
|
|
xmallctlbymib(mib, miblen, &initialized[i], &sz,
|
|
|
|
NULL, 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (initialized[i]) {
|
2016-11-01 13:30:49 +08:00
|
|
|
ninitialized++;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2017-01-04 09:21:59 +08:00
|
|
|
mib[1] = MALLCTL_ARENAS_DESTROYED;
|
|
|
|
sz = sizeof(bool);
|
|
|
|
xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
|
|
|
|
NULL, 0);
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
/* Merged stats. */
|
|
|
|
if (merged && (ninitialized > 1 || !unmerged)) {
|
|
|
|
/* Print merged arena stats. */
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"merged\": {\n");
|
|
|
|
} else {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2010-01-31 19:57:29 +08:00
|
|
|
"\nMerged arenas stats:\n");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
stats_arena_print(write_cb, cbopaque, json,
|
2017-01-03 23:27:42 +08:00
|
|
|
MALLCTL_ARENAS_ALL, bins, large);
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-01-18 17:01:19 +08:00
|
|
|
"\t\t\t}%s\n",
|
|
|
|
((destroyed_initialized &&
|
|
|
|
destroyed) || unmerged) ? "," :
|
|
|
|
"");
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 09:21:59 +08:00
|
|
|
/* Destroyed stats. */
|
|
|
|
if (destroyed_initialized && destroyed) {
|
|
|
|
/* Print destroyed arena stats. */
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t\t\"destroyed\": {\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\nDestroyed arenas stats:\n");
|
|
|
|
}
|
|
|
|
stats_arena_print(write_cb, cbopaque, json,
|
|
|
|
MALLCTL_ARENAS_DESTROYED, bins, large);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
2017-01-18 17:01:19 +08:00
|
|
|
"\t\t\t}%s\n", unmerged ? "," :
|
|
|
|
"");
|
2017-01-04 09:21:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/* Unmerged stats. */
|
2017-01-18 17:01:19 +08:00
|
|
|
if (unmerged) {
|
|
|
|
for (i = j = 0; i < narenas; i++) {
|
|
|
|
if (initialized[i]) {
|
|
|
|
if (json) {
|
|
|
|
j++;
|
|
|
|
malloc_cprintf(write_cb,
|
|
|
|
cbopaque,
|
|
|
|
"\t\t\t\"%u\": {\n",
|
|
|
|
i);
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb,
|
|
|
|
cbopaque,
|
|
|
|
"\narenas[%u]:\n",
|
|
|
|
i);
|
|
|
|
}
|
|
|
|
stats_arena_print(write_cb,
|
|
|
|
cbopaque, json, i, bins,
|
|
|
|
large);
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb,
|
|
|
|
cbopaque,
|
|
|
|
"\t\t\t}%s\n", (j <
|
|
|
|
ninitialized) ? ","
|
|
|
|
: "");
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2010-01-25 09:21:47 +08:00
|
|
|
}
|
2010-01-18 09:35:19 +08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t\t}\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
void
|
|
|
|
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
2017-01-16 08:56:30 +08:00
|
|
|
const char *opts) {
|
2016-11-01 13:30:49 +08:00
|
|
|
int err;
|
|
|
|
uint64_t epoch;
|
|
|
|
size_t u64sz;
|
|
|
|
bool json = false;
|
|
|
|
bool general = true;
|
2016-12-24 03:15:44 +08:00
|
|
|
bool merged = config_stats;
|
2017-01-04 09:21:59 +08:00
|
|
|
bool destroyed = config_stats;
|
2016-12-24 03:15:44 +08:00
|
|
|
bool unmerged = config_stats;
|
2016-11-01 13:30:49 +08:00
|
|
|
bool bins = true;
|
|
|
|
bool large = true;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
/*
|
|
|
|
* Refresh stats, in case mallctl() was called by the application.
|
|
|
|
*
|
|
|
|
* Check for OOM here, since refreshing the ctl cache can trigger
|
|
|
|
* allocation. In practice, none of the subsequent mallctl()-related
|
|
|
|
* calls in this function will cause OOM if this one succeeds.
|
|
|
|
* */
|
|
|
|
epoch = 1;
|
|
|
|
u64sz = sizeof(uint64_t);
|
2016-11-16 07:01:03 +08:00
|
|
|
err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
|
|
|
|
sizeof(uint64_t));
|
2016-11-01 13:30:49 +08:00
|
|
|
if (err != 0) {
|
|
|
|
if (err == EAGAIN) {
|
|
|
|
malloc_write("<jemalloc>: Memory allocation failure in "
|
|
|
|
"mallctl(\"epoch\", ...)\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
|
|
|
|
"...)\n");
|
|
|
|
abort();
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
if (opts != NULL) {
|
|
|
|
unsigned i;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-11-01 13:30:49 +08:00
|
|
|
for (i = 0; opts[i] != '\0'; i++) {
|
|
|
|
switch (opts[i]) {
|
|
|
|
case 'J':
|
|
|
|
json = true;
|
|
|
|
break;
|
|
|
|
case 'g':
|
|
|
|
general = false;
|
|
|
|
break;
|
|
|
|
case 'm':
|
|
|
|
merged = false;
|
|
|
|
break;
|
2017-01-04 09:21:59 +08:00
|
|
|
case 'd':
|
|
|
|
destroyed = false;
|
|
|
|
break;
|
2016-11-01 13:30:49 +08:00
|
|
|
case 'a':
|
|
|
|
unmerged = false;
|
|
|
|
break;
|
|
|
|
case 'b':
|
|
|
|
bins = false;
|
|
|
|
break;
|
|
|
|
case 'l':
|
|
|
|
large = false;
|
|
|
|
break;
|
|
|
|
default:;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"{\n"
|
|
|
|
"\t\"jemalloc\": {\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"___ Begin jemalloc statistics ___\n");
|
|
|
|
}
|
|
|
|
|
2016-12-24 03:15:44 +08:00
|
|
|
if (general) {
|
|
|
|
bool more = (merged || unmerged);
|
|
|
|
stats_general_print(write_cb, cbopaque, json, more);
|
|
|
|
}
|
2016-11-01 13:30:49 +08:00
|
|
|
if (config_stats) {
|
2017-01-04 09:21:59 +08:00
|
|
|
stats_print_helper(write_cb, cbopaque, json, merged, destroyed,
|
|
|
|
unmerged, bins, large);
|
2016-11-01 13:30:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (json) {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"\t}\n"
|
|
|
|
"}\n");
|
|
|
|
} else {
|
|
|
|
malloc_cprintf(write_cb, cbopaque,
|
|
|
|
"--- End jemalloc statistics ---\n");
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|