Add small run utilization to stats output.

Add the 'util' column, which reports the proportion of available regions
that are currently in use for each small size class.  Small run
utilization is the complement of external fragmentation.  For example,
utilization of 0.75 indicates that 25% of small run memory is consumed
by external fragmentation, in other (more obtuse) words, 33% external
fragmentation overhead.

This resolves #27.
This commit is contained in:
Jason Evans 2014-10-15 16:18:42 -07:00
parent acbcbad1e1
commit bf8d6a1092

View File

@ -69,14 +69,14 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_tcache) { if (config_tcache) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc" "bins: size ind allocated nmalloc"
" ndalloc nrequests curregs regs pgs" " ndalloc nrequests curregs curruns regs"
" nfills nflushes newruns reruns" " pgs util nfills nflushes newruns"
" curruns\n"); " reruns\n");
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc" "bins: size ind allocated nmalloc"
" ndalloc nrequests curregs regs pgs" " ndalloc nrequests curregs curruns regs"
" newruns reruns curruns\n"); " pgs util newruns reruns\n");
} }
CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, in_gap = false; j < nbins; j++) { for (j = 0, in_gap = false; j < nbins; j++) {
@ -86,11 +86,12 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (nruns == 0) if (nruns == 0)
in_gap = true; in_gap = true;
else { else {
size_t reg_size, run_size, curregs; size_t reg_size, run_size, curregs, availregs, milli;
size_t curruns;
uint32_t nregs; uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns; uint64_t reruns;
size_t curruns; char util[6]; /* "x.yyy". */
if (in_gap) { if (in_gap) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
@ -118,24 +119,41 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t); uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
size_t); size_t);
availregs = nregs * curruns;
milli = (availregs != 0) ? (1000 * curregs) / availregs
: 1000;
assert(milli <= 1000);
if (milli < 10) {
malloc_snprintf(util, sizeof(util), "0.00%zu",
milli);
} else if (milli < 100) {
malloc_snprintf(util, sizeof(util), "0.0%zu",
milli);
} else if (milli < 1000) {
malloc_snprintf(util, sizeof(util), "0.%zu",
milli);
} else
malloc_snprintf(util, sizeof(util), "1");
if (config_tcache) { if (config_tcache) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"PRIu64" %12"PRIu64 "%20zu %3u %12zu %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12zu %4u %3zu %12"PRIu64 " %12"PRIu64" %12zu %12zu %4u %3zu %-5s"
" %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n", " %12"PRIu64"\n",
reg_size, j, curregs * reg_size, nmalloc, reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, nregs, run_size ndalloc, nrequests, curregs, curruns, nregs,
/ page, nfills, nflushes, nruns, reruns, run_size / page, util, nfills, nflushes,
curruns); nruns, reruns);
} else { } else {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"PRIu64" %12"PRIu64 "%20zu %3u %12zu %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12zu %4u %3zu %12"PRIu64 " %12"PRIu64" %12zu %12zu %4u %3zu %-5s"
" %12"PRIu64" %12zu\n", " %12"PRIu64" %12"PRIu64"\n",
reg_size, j, curregs * reg_size, nmalloc, reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, nregs, ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, nruns, reruns, curruns); run_size / page, util, nruns, reruns);
} }
} }
} }