From a02fc08ec9dd8479a6430155b6a433da09f6ff10 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Wed, 31 Mar 2010 17:35:51 -0700 Subject: [PATCH] Make interval-triggered profile dumping optional. Make it possible to disable interval-triggered profile dumping, even if profiling is enabled. This is useful if the user only wants a single dump at exit, or if the application manually triggers profile dumps. --- jemalloc/doc/jemalloc.3.in | 8 +++++--- jemalloc/include/jemalloc/internal/prof.h | 2 +- jemalloc/src/ctl.c | 2 +- jemalloc/src/jemalloc.c | 2 +- jemalloc/src/prof.c | 11 ++++++++--- jemalloc/src/stats.c | 13 ++++++++----- 6 files changed, 24 insertions(+), 14 deletions(-) diff --git a/jemalloc/doc/jemalloc.3.in b/jemalloc/doc/jemalloc.3.in index 1ea93bf5..13e616ab 100644 --- a/jemalloc/doc/jemalloc.3.in +++ b/jemalloc/doc/jemalloc.3.in @@ -38,7 +38,7 @@ .\" @(#)malloc.3 8.1 (Berkeley) 6/4/93 .\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $ .\" -.Dd March 17, 2010 +.Dd March 31, 2010 .Dt JEMALLOC 3 .Os .Sh NAME @@ -401,7 +401,9 @@ will disable dirty page purging. @roff_prof@is controlled by the @roff_prof@JEMALLOC_PROF_PREFIX @roff_prof@environment variable. -@roff_prof@The default average interval is 1 GiB. +@roff_prof@The default average interval is 1 GiB; +@roff_prof@.Ev JEMALLOC_OPTIONS=31i +@roff_prof@will disable interval-triggered profile dumping. @roff_fill@.It J @roff_fill@Each byte of new memory allocated by @roff_fill@.Fn @jemalloc_prefix@malloc @@ -794,7 +796,7 @@ option. @roff_prof@option. @roff_prof@.Ed .\"----------------------------------------------------------------------------- -@roff_prof@.It Sy "opt.lg_prof_interval (size_t) r-" +@roff_prof@.It Sy "opt.lg_prof_interval (ssize_t) r-" @roff_prof@.Bd -ragged -offset indent -compact @roff_prof@See the @roff_prof@.Dq I diff --git a/jemalloc/include/jemalloc/internal/prof.h b/jemalloc/include/jemalloc/internal/prof.h index 0a5db297..2a0e539b 100644 --- a/jemalloc/include/jemalloc/internal/prof.h +++ b/jemalloc/include/jemalloc/internal/prof.h @@ -121,7 +121,7 @@ struct prof_ctx_s { extern bool opt_prof; extern size_t opt_lg_prof_bt_max; /* Maximum backtrace depth. */ extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern size_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_udump; /* High-water memory dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ diff --git a/jemalloc/src/ctl.c b/jemalloc/src/ctl.c index 2249102f..f628c134 100644 --- a/jemalloc/src/ctl.c +++ b/jemalloc/src/ctl.c @@ -1153,7 +1153,7 @@ CTL_RO_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t) CTL_RO_GEN(opt_prof, opt_prof, bool) CTL_RO_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t) CTL_RO_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, size_t) +CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_GEN(opt_prof_udump, opt_prof_udump, bool) CTL_RO_GEN(opt_prof_leak, opt_prof_leak, bool) #endif diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c index b30f2313..d3c7cca1 100644 --- a/jemalloc/src/jemalloc.c +++ b/jemalloc/src/jemalloc.c @@ -486,7 +486,7 @@ MALLOC_OUT: #endif #ifdef JEMALLOC_PROF case 'i': - if (opt_lg_prof_interval > 0) + if (opt_lg_prof_interval >= 0) opt_lg_prof_interval--; break; case 'I': diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c index 80c81dac..97db422b 100644 --- a/jemalloc/src/prof.c +++ b/jemalloc/src/prof.c @@ -20,7 +20,7 @@ bool opt_prof = false; size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -size_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_udump = false; bool opt_prof_leak = false; @@ -1271,8 +1271,13 @@ prof_boot0(void) opt_prof = true; opt_prof_udump = false; prof_interval = 0; - } else if (opt_prof) - prof_interval = (((uint64_t)1U) << opt_lg_prof_interval); + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } else + prof_interval = 0; + } prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT); } diff --git a/jemalloc/src/stats.c b/jemalloc/src/stats.c index a5ec1f1f..60e75bc5 100644 --- a/jemalloc/src/stats.c +++ b/jemalloc/src/stats.c @@ -582,12 +582,15 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, write_cb(cbopaque, umax2s(sv, 10, s)); write_cb(cbopaque, ")\n"); - CTL_GET("opt.lg_prof_interval", &sv, size_t); + CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); write_cb(cbopaque, "Average profile dump interval: "); - write_cb(cbopaque, umax2s((1U << sv), 10, s)); - write_cb(cbopaque, " (2^"); - write_cb(cbopaque, umax2s(sv, 10, s)); - write_cb(cbopaque, ")\n"); + if (ssv >= 0) { + write_cb(cbopaque, umax2s((1U << ssv), 10, s)); + write_cb(cbopaque, " (2^"); + write_cb(cbopaque, umax2s(ssv, 10, s)); + write_cb(cbopaque, ")\n"); + } else + write_cb(cbopaque, "N/A\n"); } CTL_GET("arenas.chunksize", &sv, size_t); write_cb(cbopaque, "Chunk size: ");