Make interval-triggered profile dumping optional.

Make it possible to disable interval-triggered profile dumping, even if
profiling is enabled.  This is useful if the user only wants a single
dump at exit, or if the application manually triggers profile dumps.
This commit is contained in:
Jason Evans 2010-03-31 17:35:51 -07:00
parent 0b270a991d
commit a02fc08ec9
6 changed files with 24 additions and 14 deletions

View File

@ -38,7 +38,7 @@
.\" @(#)malloc.3 8.1 (Berkeley) 6/4/93
.\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $
.\"
.Dd March 17, 2010
.Dd March 31, 2010
.Dt JEMALLOC 3
.Os
.Sh NAME
@ -401,7 +401,9 @@ will disable dirty page purging.
@roff_prof@is controlled by the
@roff_prof@JEMALLOC_PROF_PREFIX
@roff_prof@environment variable.
@roff_prof@The default average interval is 1 GiB.
@roff_prof@The default average interval is 1 GiB;
@roff_prof@.Ev JEMALLOC_OPTIONS=31i
@roff_prof@will disable interval-triggered profile dumping.
@roff_fill@.It J
@roff_fill@Each byte of new memory allocated by
@roff_fill@.Fn @jemalloc_prefix@malloc
@ -794,7 +796,7 @@ option.
@roff_prof@option.
@roff_prof@.Ed
.\"-----------------------------------------------------------------------------
@roff_prof@.It Sy "opt.lg_prof_interval (size_t) r-"
@roff_prof@.It Sy "opt.lg_prof_interval (ssize_t) r-"
@roff_prof@.Bd -ragged -offset indent -compact
@roff_prof@See the
@roff_prof@.Dq I

View File

@ -121,7 +121,7 @@ struct prof_ctx_s {
extern bool opt_prof;
extern size_t opt_lg_prof_bt_max; /* Maximum backtrace depth. */
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern size_t opt_lg_prof_interval; /* lg(prof_interval). */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_udump; /* High-water memory dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */

View File

@ -1153,7 +1153,7 @@ CTL_RO_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
CTL_RO_GEN(opt_prof, opt_prof, bool)
CTL_RO_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
CTL_RO_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, size_t)
CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_GEN(opt_prof_udump, opt_prof_udump, bool)
CTL_RO_GEN(opt_prof_leak, opt_prof_leak, bool)
#endif

View File

@ -486,7 +486,7 @@ MALLOC_OUT:
#endif
#ifdef JEMALLOC_PROF
case 'i':
if (opt_lg_prof_interval > 0)
if (opt_lg_prof_interval >= 0)
opt_lg_prof_interval--;
break;
case 'I':

View File

@ -20,7 +20,7 @@
bool opt_prof = false;
size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
size_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_udump = false;
bool opt_prof_leak = false;
@ -1271,8 +1271,13 @@ prof_boot0(void)
opt_prof = true;
opt_prof_udump = false;
prof_interval = 0;
} else if (opt_prof)
prof_interval = (((uint64_t)1U) << opt_lg_prof_interval);
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
} else
prof_interval = 0;
}
prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT);
}

View File

@ -582,12 +582,15 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
write_cb(cbopaque, umax2s(sv, 10, s));
write_cb(cbopaque, ")\n");
CTL_GET("opt.lg_prof_interval", &sv, size_t);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
write_cb(cbopaque, "Average profile dump interval: ");
write_cb(cbopaque, umax2s((1U << sv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, umax2s(sv, 10, s));
write_cb(cbopaque, ")\n");
if (ssv >= 0) {
write_cb(cbopaque, umax2s((1U << ssv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, umax2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
}
CTL_GET("arenas.chunksize", &sv, size_t);
write_cb(cbopaque, "Chunk size: ");