Refactor !opt.munmap to opt.retain.

This commit is contained in:
Jason Evans 2017-04-26 16:26:12 -07:00
parent d901a37775
commit b9ab04a191
16 changed files with 42 additions and 43 deletions

View File

@ -517,7 +517,7 @@ dnl
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
default_munmap="1"
default_retain="0"
maps_coalesce="1"
case "${host}" in
*-*-darwin* | *-*-ios*)
@ -557,7 +557,7 @@ case "${host}" in
AC_DEFINE([JEMALLOC_C11_ATOMICS])
force_tls="0"
if test "${LG_SIZEOF_PTR}" = "3"; then
default_munmap="0"
default_retain="1"
fi
;;
*-*-linux* | *-*-kfreebsd*)
@ -570,7 +570,7 @@ case "${host}" in
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
if test "${LG_SIZEOF_PTR}" = "3"; then
default_munmap="0"
default_retain="1"
fi
;;
*-*-netbsd*)
@ -1086,9 +1086,9 @@ if test "x${maps_coalesce}" = "x1" ; then
AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
fi
dnl Indicate whether to use munmap() by default.
if test "x$default_munmap" = "x1" ; then
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
dnl Indicate whether to retain memory (rather than using) munmap()) by default.
if test "x$default_retain" = "x1" ; then
AC_DEFINE([JEMALLOC_RETAIN], [ ])
fi
dnl Enable allocation from DSS if supported by the OS.

View File

@ -863,25 +863,26 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
</para></listitem>
</varlistentry>
<varlistentry id="opt.munmap">
<varlistentry id="opt.retain">
<term>
<mallctl>opt.munmap</mallctl>
<mallctl>opt.retain</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para>If true, call
<listitem><para>If true, retain unused virtual memory for later reuse
rather than discarding it by calling
<citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> or equivalent rather than
retaining unused virtual memory (see <link
<manvolnum>2</manvolnum></citerefentry> or equivalent (see <link
linkend="stats.retained">stats.retained</link> for related details).
This option is enabled by default unless it is known to trigger
This option is disabled by default unless discarding virtual memory is
known to trigger
platform-specific performance problems, e.g. for [64-bit] Linux, which
has a quirk in its virtual memory allocation algorithm that causes
semi-permanent VM map holes under normal jemalloc operation. Although
<citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> causes issues on 32-bit Linux as
well, it is not disabled by default due to the practical possibility of
address space exhaustion.
well, retaining virtual memory for 32-bit Linux is disabled by default
due to the practical possibility of address space exhaustion.
</para></listitem>
</varlistentry>

View File

@ -230,7 +230,7 @@ struct arena_s {
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if !opt_munmap). This limits the
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.

View File

@ -1,7 +1,7 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
extern bool opt_munmap;
extern bool opt_retain;
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);

View File

@ -59,10 +59,9 @@ struct extent_s {
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if JEMALLOC_MUNMAP is defined, but
* as long as comparison functions fall back on address comparison
* for equal serial numbers, stable (if imperfect) ordering is
* maintained.
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same

View File

@ -192,12 +192,12 @@
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed extents by default, rather than
* storing them for later reuse. This is disabled on 64-bit Linux because
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#undef JEMALLOC_MUNMAP
#undef JEMALLOC_RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS

View File

@ -117,10 +117,9 @@ typedef struct arena_stats_s {
atomic_zu_t mapped; /* Partially derived. */
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
atomic_zu_t retained; /* Derived. */

View File

@ -1143,8 +1143,8 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
* opportunity to unmap all retained memory without having to keep its
* own metadata structures, but if deallocation fails, that is the
* application's decision/problem. In practice, retained extents are
* leaked here if !opt_munmap unless the application provided custom
* extent hooks, so best practice is to either enable munmap (and avoid
* leaked here if opt_retain unless the application provided custom
* extent hooks, so best practice is to either disable retain (and avoid
* dss for arenas to be destroyed), or provide custom extent hooks that
* either unmap retained extents or track them for later use.
*/
@ -1947,7 +1947,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
if (!opt_munmap) {
if (opt_retain) {
atomic_store_u(&arena->extent_grow_next, psz2ind(HUGEPAGE),
ATOMIC_RELAXED);
}

View File

@ -74,7 +74,7 @@ CTL_PROTO(config_stats)
CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_munmap)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena)
@ -260,7 +260,7 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("munmap"), CTL(opt_munmap)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
@ -1455,7 +1455,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_munmap, opt_munmap, bool)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, opt_percpu_arena, const char *)

View File

@ -1123,7 +1123,7 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
extent_gdump_add(tsdn, extent);
}
}
if (!opt_munmap && extent == NULL) {
if (opt_retain && extent == NULL) {
extent = extent_grow_retained(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}

View File

@ -7,8 +7,8 @@
/******************************************************************************/
/* Data. */
bool opt_munmap =
#ifdef JEMALLOC_MUNMAP
bool opt_retain =
#ifdef JEMALLOC_RETAIN
true
#else
false
@ -34,8 +34,8 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool
extent_dalloc_mmap(void *addr, size_t size) {
if (opt_munmap) {
if (!opt_retain) {
pages_unmap(addr, size);
}
return !opt_munmap;
return opt_retain;
}

View File

@ -1043,7 +1043,7 @@ malloc_conf_init(void) {
}
CONF_HANDLE_BOOL(opt_abort, "abort")
CONF_HANDLE_BOOL(opt_munmap, "munmap")
CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;

View File

@ -93,7 +93,7 @@ large_dalloc_maybe_junk(void *ptr, size_t size) {
* Only bother junk filling if the extent isn't about to be
* unmapped.
*/
if (!opt_munmap || (have_dss && extent_in_dss(ptr))) {
if (opt_retain || (have_dss && extent_in_dss(ptr))) {
large_dalloc_junk(ptr, size);
}
}

View File

@ -802,7 +802,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Run-time option settings:\n");
}
OPT_WRITE_BOOL(abort, ",")
OPT_WRITE_BOOL(munmap, ",")
OPT_WRITE_BOOL(retain, ",")
OPT_WRITE_CHAR_P(dss, ",")
OPT_WRITE_UNSIGNED(narenas, ",")
OPT_WRITE_CHAR_P(percpu_arena, ",")

View File

@ -251,7 +251,7 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
TEST_END
/*
* Actually unmap extents, regardless of opt_munmap, so that attempts to access
* Actually unmap extents, regardless of opt_retain, so that attempts to access
* a destroyed arena's memory will segfault.
*/
static bool

View File

@ -157,7 +157,7 @@ TEST_BEGIN(test_mallctl_opt) {
} while (0)
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(bool, munmap, always);
TEST_MALLCTL_OPT(bool, retain, always);
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(const char *, percpu_arena, always);