Replace --disable-munmap with opt.munmap.
Control use of munmap(2) via a run-time option rather than a compile-time option (with the same per platform default). The old behavior of --disable-munmap can be achieved with --with-malloc-conf=munmap:false. This partially resolves #580.
This commit is contained in:
parent
e2cc6280ed
commit
c67c3e4a63
9
INSTALL
9
INSTALL
@ -128,15 +128,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
||||
Statically link against the specified libunwind.a rather than dynamically
|
||||
linking with -lunwind.
|
||||
|
||||
--disable-munmap
|
||||
Disable virtual memory deallocation via munmap(2); instead keep track of
|
||||
the virtual memory for later use. munmap() is disabled by default (i.e.
|
||||
--disable-munmap is implied) on [64-bit] Linux, which has a quirk in its
|
||||
virtual memory allocation algorithm that causes semi-permanent VM map holes
|
||||
under normal jemalloc operation. Although munmap() causes issues on 32-bit
|
||||
Linux as well, it is not disabled by default due to the practical
|
||||
possibility of address space exhaustion.
|
||||
|
||||
--disable-fill
|
||||
Disable support for junk/zero filling of memory. See the "opt.junk" and
|
||||
"opt.zero" option documentation for usage details.
|
||||
|
16
configure.ac
16
configure.ac
@ -1086,21 +1086,10 @@ if test "x${maps_coalesce}" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
|
||||
fi
|
||||
|
||||
dnl Enable VM deallocation via munmap() by default.
|
||||
AC_ARG_ENABLE([munmap],
|
||||
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
||||
[if test "x$enable_munmap" = "xno" ; then
|
||||
enable_munmap="0"
|
||||
else
|
||||
enable_munmap="1"
|
||||
fi
|
||||
],
|
||||
[enable_munmap="${default_munmap}"]
|
||||
)
|
||||
if test "x$enable_munmap" = "x1" ; then
|
||||
dnl Indicate whether to use munmap() by default.
|
||||
if test "x$default_munmap" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_munmap])
|
||||
|
||||
dnl Enable allocation from DSS if supported by the OS.
|
||||
have_dss="1"
|
||||
@ -2076,7 +2065,6 @@ AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
||||
AC_MSG_RESULT([fill : ${enable_fill}])
|
||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||
AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
|
||||
AC_MSG_RESULT([cxx : ${enable_cxx}])
|
||||
|
@ -788,16 +788,6 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
during build configuration.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.munmap">
|
||||
<term>
|
||||
<mallctl>config.munmap</mallctl>
|
||||
(<type>bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para><option>--enable-munmap</option> was specified during
|
||||
build configuration.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.prof">
|
||||
<term>
|
||||
<mallctl>config.prof</mallctl>
|
||||
@ -873,6 +863,28 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.munmap">
|
||||
<term>
|
||||
<mallctl>opt.munmap</mallctl>
|
||||
(<type>bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>If true, call
|
||||
<citerefentry><refentrytitle>munmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> or equivalent rather than
|
||||
retaining unused virtual memory (see <link
|
||||
linkend="stats.retained">stats.retained</link> for related details).
|
||||
This option is enabled by default unless it is known to trigger
|
||||
platform-specific performance problems, e.g. for [64-bit] Linux, which
|
||||
has a quirk in its virtual memory allocation algorithm that causes
|
||||
semi-permanent VM map holes under normal jemalloc operation. Although
|
||||
<citerefentry><refentrytitle>munmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> causes issues on 32-bit Linux as
|
||||
well, it is not disabled by default due to the practical possibility of
|
||||
address space exhaustion.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.dss">
|
||||
<term>
|
||||
<mallctl>opt.dss</mallctl>
|
||||
@ -2114,9 +2126,9 @@ struct extent_hooks_s {
|
||||
<listitem><para>Total number of bytes in virtual memory mappings that
|
||||
were retained rather than being returned to the operating system via
|
||||
e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry>. Retained virtual memory is
|
||||
typically untouched, decommitted, or purged, so it has no strongly
|
||||
associated physical memory (see <link
|
||||
<manvolnum>2</manvolnum></citerefentry> or similar. Retained virtual
|
||||
memory is typically untouched, decommitted, or purged, so it has no
|
||||
strongly associated physical memory (see <link
|
||||
linkend="arena.i.extent_hooks">extent hooks</link> for details).
|
||||
Retained memory is excluded from mapped memory statistics, e.g. <link
|
||||
linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
|
||||
|
@ -230,10 +230,10 @@ struct arena_s {
|
||||
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if !config_munmap). This limits
|
||||
* the number of disjoint virtual memory ranges so that extent merging
|
||||
* can be effective even if multiple arenas' extent allocation requests
|
||||
* are highly interleaved.
|
||||
* request via the extent hooks (only if !opt_munmap). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||
|
||||
extern bool opt_munmap;
|
||||
|
||||
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit);
|
||||
bool extent_dalloc_mmap(void *addr, size_t size);
|
||||
|
@ -192,9 +192,10 @@
|
||||
#undef JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, use munmap() to unmap freed extents, rather than storing them for
|
||||
* later reuse. This is disabled by default on Linux because common sequences
|
||||
* of mmap()/munmap() calls will cause virtual memory map holes.
|
||||
* If defined, use munmap() to unmap freed extents by default, rather than
|
||||
* storing them for later reuse. This is disabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#undef JEMALLOC_MUNMAP
|
||||
|
||||
|
@ -98,13 +98,6 @@ static const bool maps_coalesce =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_munmap =
|
||||
#ifdef JEMALLOC_MUNMAP
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_stats =
|
||||
#ifdef JEMALLOC_STATS
|
||||
true
|
||||
|
@ -1143,7 +1143,7 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
||||
* opportunity to unmap all retained memory without having to keep its
|
||||
* own metadata structures, but if deallocation fails, that is the
|
||||
* application's decision/problem. In practice, retained extents are
|
||||
* leaked here if !config_munmap unless the application provided custom
|
||||
* leaked here if !opt_munmap unless the application provided custom
|
||||
* extent hooks, so best practice is to either enable munmap (and avoid
|
||||
* dss for arenas to be destroyed), or provide custom extent hooks that
|
||||
* either unmap retained extents or track them for later use.
|
||||
@ -1947,7 +1947,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
if (!config_munmap) {
|
||||
if (!opt_munmap) {
|
||||
atomic_store_u(&arena->extent_grow_next, psz2ind(HUGEPAGE),
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
|
@ -66,7 +66,6 @@ CTL_PROTO(config_debug)
|
||||
CTL_PROTO(config_fill)
|
||||
CTL_PROTO(config_lazy_lock)
|
||||
CTL_PROTO(config_malloc_conf)
|
||||
CTL_PROTO(config_munmap)
|
||||
CTL_PROTO(config_prof)
|
||||
CTL_PROTO(config_prof_libgcc)
|
||||
CTL_PROTO(config_prof_libunwind)
|
||||
@ -74,6 +73,7 @@ CTL_PROTO(config_stats)
|
||||
CTL_PROTO(config_utrace)
|
||||
CTL_PROTO(config_xmalloc)
|
||||
CTL_PROTO(opt_abort)
|
||||
CTL_PROTO(opt_munmap)
|
||||
CTL_PROTO(opt_dss)
|
||||
CTL_PROTO(opt_narenas)
|
||||
CTL_PROTO(opt_percpu_arena)
|
||||
@ -249,7 +249,6 @@ static const ctl_named_node_t config_node[] = {
|
||||
{NAME("fill"), CTL(config_fill)},
|
||||
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
||||
{NAME("malloc_conf"), CTL(config_malloc_conf)},
|
||||
{NAME("munmap"), CTL(config_munmap)},
|
||||
{NAME("prof"), CTL(config_prof)},
|
||||
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
||||
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
|
||||
@ -260,6 +259,7 @@ static const ctl_named_node_t config_node[] = {
|
||||
|
||||
static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("abort"), CTL(opt_abort)},
|
||||
{NAME("munmap"), CTL(opt_munmap)},
|
||||
{NAME("dss"), CTL(opt_dss)},
|
||||
{NAME("narenas"), CTL(opt_narenas)},
|
||||
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
|
||||
@ -1444,7 +1444,6 @@ CTL_RO_CONFIG_GEN(config_debug, bool)
|
||||
CTL_RO_CONFIG_GEN(config_fill, bool)
|
||||
CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
|
||||
CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
|
||||
CTL_RO_CONFIG_GEN(config_munmap, bool)
|
||||
CTL_RO_CONFIG_GEN(config_prof, bool)
|
||||
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
|
||||
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
|
||||
@ -1455,6 +1454,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
|
||||
CTL_RO_NL_GEN(opt_munmap, opt_munmap, bool)
|
||||
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
||||
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
|
||||
CTL_RO_NL_GEN(opt_percpu_arena, opt_percpu_arena, const char *)
|
||||
|
@ -1123,7 +1123,7 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_gdump_add(tsdn, extent);
|
||||
}
|
||||
}
|
||||
if (!config_munmap && extent == NULL) {
|
||||
if (!opt_munmap && extent == NULL) {
|
||||
extent = extent_grow_retained(tsdn, arena, r_extent_hooks,
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
||||
}
|
||||
|
@ -4,6 +4,17 @@
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
bool opt_munmap =
|
||||
#ifdef JEMALLOC_MUNMAP
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
@ -23,8 +34,8 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
|
||||
bool
|
||||
extent_dalloc_mmap(void *addr, size_t size) {
|
||||
if (config_munmap) {
|
||||
if (opt_munmap) {
|
||||
pages_unmap(addr, size);
|
||||
}
|
||||
return !config_munmap;
|
||||
return !opt_munmap;
|
||||
}
|
||||
|
@ -947,7 +947,7 @@ malloc_conf_init(void) {
|
||||
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
|
||||
#define CONF_MATCH_VALUE(n) \
|
||||
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
|
||||
#define CONF_HANDLE_BOOL(o, n, cont) \
|
||||
#define CONF_HANDLE_BOOL(o, n) \
|
||||
if (CONF_MATCH(n)) { \
|
||||
if (CONF_MATCH_VALUE("true")) { \
|
||||
o = true; \
|
||||
@ -958,9 +958,7 @@ malloc_conf_init(void) {
|
||||
"Invalid conf value", \
|
||||
k, klen, v, vlen); \
|
||||
} \
|
||||
if (cont) { \
|
||||
continue; \
|
||||
} \
|
||||
}
|
||||
#define CONF_MIN_no(um, min) false
|
||||
#define CONF_MIN_yes(um, min) ((um) < (min))
|
||||
@ -1043,7 +1041,8 @@ malloc_conf_init(void) {
|
||||
continue; \
|
||||
}
|
||||
|
||||
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
||||
CONF_HANDLE_BOOL(opt_abort, "abort")
|
||||
CONF_HANDLE_BOOL(opt_munmap, "munmap")
|
||||
if (strncmp("dss", k, klen) == 0) {
|
||||
int i;
|
||||
bool match = false;
|
||||
@ -1074,7 +1073,7 @@ malloc_conf_init(void) {
|
||||
"dirty_decay_time", -1, NSTIME_SEC_MAX);
|
||||
CONF_HANDLE_SSIZE_T(opt_muzzy_decay_time,
|
||||
"muzzy_decay_time", -1, NSTIME_SEC_MAX);
|
||||
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
|
||||
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
|
||||
if (config_fill) {
|
||||
if (CONF_MATCH("junk")) {
|
||||
if (CONF_MATCH_VALUE("true")) {
|
||||
@ -1100,15 +1099,15 @@ malloc_conf_init(void) {
|
||||
}
|
||||
continue;
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
||||
CONF_HANDLE_BOOL(opt_zero, "zero")
|
||||
}
|
||||
if (config_utrace) {
|
||||
CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
|
||||
CONF_HANDLE_BOOL(opt_utrace, "utrace")
|
||||
}
|
||||
if (config_xmalloc) {
|
||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
|
||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
|
||||
CONF_HANDLE_BOOL(opt_tcache, "tcache")
|
||||
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
|
||||
-1, (sizeof(size_t) << 3) - 1)
|
||||
if (strncmp("percpu_arena", k, klen) == 0) {
|
||||
@ -1136,27 +1135,22 @@ malloc_conf_init(void) {
|
||||
continue;
|
||||
}
|
||||
if (config_prof) {
|
||||
CONF_HANDLE_BOOL(opt_prof, "prof", true)
|
||||
CONF_HANDLE_BOOL(opt_prof, "prof")
|
||||
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
||||
"prof_prefix", "jeprof")
|
||||
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
|
||||
true)
|
||||
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
|
||||
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
||||
"prof_thread_active_init", true)
|
||||
"prof_thread_active_init")
|
||||
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
||||
"lg_prof_sample", 0, (sizeof(uint64_t) << 3)
|
||||
- 1, no, yes, true)
|
||||
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
|
||||
true)
|
||||
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
|
||||
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
||||
"lg_prof_interval", -1,
|
||||
(sizeof(uint64_t) << 3) - 1)
|
||||
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
|
||||
true)
|
||||
CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
|
||||
true)
|
||||
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
|
||||
true)
|
||||
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
|
||||
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
|
||||
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
|
||||
}
|
||||
malloc_conf_error("Invalid conf pair", k, klen, v,
|
||||
vlen);
|
||||
|
@ -93,7 +93,7 @@ large_dalloc_maybe_junk(void *ptr, size_t size) {
|
||||
* Only bother junk filling if the extent isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (!config_munmap || (have_dss && extent_in_dss(ptr))) {
|
||||
if (!opt_munmap || (have_dss && extent_in_dss(ptr))) {
|
||||
large_dalloc_junk(ptr, size);
|
||||
}
|
||||
}
|
||||
|
@ -707,7 +707,6 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
"config.malloc_conf: \"%s\"\n", config_malloc_conf);
|
||||
}
|
||||
|
||||
CONFIG_WRITE_BOOL_JSON(munmap, ",")
|
||||
CONFIG_WRITE_BOOL_JSON(prof, ",")
|
||||
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
|
||||
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
|
||||
@ -801,6 +800,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
"Run-time option settings:\n");
|
||||
}
|
||||
OPT_WRITE_BOOL(abort, ",")
|
||||
OPT_WRITE_BOOL(munmap, ",")
|
||||
OPT_WRITE_CHAR_P(dss, ",")
|
||||
OPT_WRITE_UNSIGNED(narenas, ",")
|
||||
OPT_WRITE_CHAR_P(percpu_arena, ",")
|
||||
|
@ -251,8 +251,8 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
|
||||
TEST_END
|
||||
|
||||
/*
|
||||
* Actually unmap extents, regardless of config_munmap, so that attempts to
|
||||
* access a destroyed arena's memory will segfault.
|
||||
* Actually unmap extents, regardless of opt_munmap, so that attempts to access
|
||||
* a destroyed arena's memory will segfault.
|
||||
*/
|
||||
static bool
|
||||
extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
|
@ -131,7 +131,6 @@ TEST_BEGIN(test_mallctl_config) {
|
||||
TEST_MALLCTL_CONFIG(fill, bool);
|
||||
TEST_MALLCTL_CONFIG(lazy_lock, bool);
|
||||
TEST_MALLCTL_CONFIG(malloc_conf, const char *);
|
||||
TEST_MALLCTL_CONFIG(munmap, bool);
|
||||
TEST_MALLCTL_CONFIG(prof, bool);
|
||||
TEST_MALLCTL_CONFIG(prof_libgcc, bool);
|
||||
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
|
||||
@ -158,6 +157,7 @@ TEST_BEGIN(test_mallctl_opt) {
|
||||
} while (0)
|
||||
|
||||
TEST_MALLCTL_OPT(bool, abort, always);
|
||||
TEST_MALLCTL_OPT(bool, munmap, always);
|
||||
TEST_MALLCTL_OPT(const char *, dss, always);
|
||||
TEST_MALLCTL_OPT(unsigned, narenas, always);
|
||||
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
|
||||
|
Loading…
Reference in New Issue
Block a user