diff --git a/configure.ac b/configure.ac index 6447c51a..9f8311cc 100644 --- a/configure.ac +++ b/configure.ac @@ -517,7 +517,7 @@ dnl dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the dnl definitions need to be seen before any headers are included, which is a pain dnl to make happen otherwise. -default_munmap="1" +default_retain="0" maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) @@ -557,7 +557,7 @@ case "${host}" in AC_DEFINE([JEMALLOC_C11_ATOMICS]) force_tls="0" if test "${LG_SIZEOF_PTR}" = "3"; then - default_munmap="0" + default_retain="1" fi ;; *-*-linux* | *-*-kfreebsd*) @@ -570,7 +570,7 @@ case "${host}" in AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) if test "${LG_SIZEOF_PTR}" = "3"; then - default_munmap="0" + default_retain="1" fi ;; *-*-netbsd*) @@ -1086,9 +1086,9 @@ if test "x${maps_coalesce}" = "x1" ; then AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) fi -dnl Indicate whether to use munmap() by default. -if test "x$default_munmap" = "x1" ; then - AC_DEFINE([JEMALLOC_MUNMAP], [ ]) +dnl Indicate whether to retain memory (rather than using) munmap()) by default. +if test "x$default_retain" = "x1" ; then + AC_DEFINE([JEMALLOC_RETAIN], [ ]) fi dnl Enable allocation from DSS if supported by the OS. diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 66d8e5df..fa65c39b 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -863,25 +863,26 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay", - + - opt.munmap + opt.retain (bool) r- - If true, call + If true, retain unused virtual memory for later reuse + rather than discarding it by calling munmap - 2 or equivalent rather than - retaining unused virtual memory (see 2 or equivalent (see stats.retained for related details). - This option is enabled by default unless it is known to trigger + This option is disabled by default unless discarding virtual memory is + known to trigger platform-specific performance problems, e.g. for [64-bit] Linux, which has a quirk in its virtual memory allocation algorithm that causes semi-permanent VM map holes under normal jemalloc operation. Although munmap 2 causes issues on 32-bit Linux as - well, it is not disabled by default due to the practical possibility of - address space exhaustion. + well, retaining virtual memory for 32-bit Linux is disabled by default + due to the practical possibility of address space exhaustion. diff --git a/include/jemalloc/internal/arena_structs_b.h b/include/jemalloc/internal/arena_structs_b.h index 6b83e526..d98b455e 100644 --- a/include/jemalloc/internal/arena_structs_b.h +++ b/include/jemalloc/internal/arena_structs_b.h @@ -230,7 +230,7 @@ struct arena_s { /* * Next extent size class in a growing series to use when satisfying a - * request via the extent hooks (only if !opt_munmap). This limits the + * request via the extent hooks (only if opt_retain). This limits the * number of disjoint virtual memory ranges so that extent merging can * be effective even if multiple arenas' extent allocation requests are * highly interleaved. diff --git a/include/jemalloc/internal/extent_mmap_externs.h b/include/jemalloc/internal/extent_mmap_externs.h index e5bc8110..fe9a79ac 100644 --- a/include/jemalloc/internal/extent_mmap_externs.h +++ b/include/jemalloc/internal/extent_mmap_externs.h @@ -1,7 +1,7 @@ #ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H #define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H -extern bool opt_munmap; +extern bool opt_retain; void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h index 5d8c3a20..62bae39a 100644 --- a/include/jemalloc/internal/extent_structs.h +++ b/include/jemalloc/internal/extent_structs.h @@ -59,10 +59,9 @@ struct extent_s { * * sn: Serial number (potentially non-unique). * - * Serial numbers may wrap around if JEMALLOC_MUNMAP is defined, but - * as long as comparison functions fall back on address comparison - * for equal serial numbers, stable (if imperfect) ordering is - * maintained. + * Serial numbers may wrap around if !opt_retain, but as long as + * comparison functions fall back on address comparison for equal + * serial numbers, stable (if imperfect) ordering is maintained. * * Serial numbers may not be unique even in the absence of * wrap-around, e.g. when splitting an extent and assigning the same diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index 8f7c42b8..bccee167 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -192,12 +192,12 @@ #undef JEMALLOC_MAPS_COALESCE /* - * If defined, use munmap() to unmap freed extents by default, rather than - * storing them for later reuse. This is disabled on 64-bit Linux because + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because * common sequences of mmap()/munmap() calls will cause virtual memory map * holes. */ -#undef JEMALLOC_MUNMAP +#undef JEMALLOC_RETAIN /* TLS is used to map arenas and magazine caches to threads. */ #undef JEMALLOC_TLS diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h index 301a50ab..fd98422d 100644 --- a/include/jemalloc/internal/stats.h +++ b/include/jemalloc/internal/stats.h @@ -117,10 +117,9 @@ typedef struct arena_stats_s { atomic_zu_t mapped; /* Partially derived. */ /* - * Number of bytes currently retained as a side effect of munmap() being - * disabled/bypassed. Retained bytes are technically mapped (though - * always decommitted or purged), but they are excluded from the mapped - * statistic (above). + * Number of unused virtual memory bytes currently retained. Retained + * bytes are technically mapped (though always decommitted or purged), + * but they are excluded from the mapped statistic (above). */ atomic_zu_t retained; /* Derived. */ diff --git a/src/arena.c b/src/arena.c index 3b94a20d..2c7cea08 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1143,8 +1143,8 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { * opportunity to unmap all retained memory without having to keep its * own metadata structures, but if deallocation fails, that is the * application's decision/problem. In practice, retained extents are - * leaked here if !opt_munmap unless the application provided custom - * extent hooks, so best practice is to either enable munmap (and avoid + * leaked here if opt_retain unless the application provided custom + * extent hooks, so best practice is to either disable retain (and avoid * dss for arenas to be destroyed), or provide custom extent hooks that * either unmap retained extents or track them for later use. */ @@ -1947,7 +1947,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { goto label_error; } - if (!opt_munmap) { + if (opt_retain) { atomic_store_u(&arena->extent_grow_next, psz2ind(HUGEPAGE), ATOMIC_RELAXED); } diff --git a/src/ctl.c b/src/ctl.c index 3591f891..7d53a336 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -74,7 +74,7 @@ CTL_PROTO(config_stats) CTL_PROTO(config_utrace) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) -CTL_PROTO(opt_munmap) +CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) CTL_PROTO(opt_narenas) CTL_PROTO(opt_percpu_arena) @@ -260,7 +260,7 @@ static const ctl_named_node_t config_node[] = { static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, - {NAME("munmap"), CTL(opt_munmap)}, + {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)}, @@ -1455,7 +1455,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) -CTL_RO_NL_GEN(opt_munmap, opt_munmap, bool) +CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_percpu_arena, opt_percpu_arena, const char *) diff --git a/src/extent.c b/src/extent.c index 1ddaf240..bc17711c 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1123,7 +1123,7 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_gdump_add(tsdn, extent); } } - if (!opt_munmap && extent == NULL) { + if (opt_retain && extent == NULL) { extent = extent_grow_retained(tsdn, arena, r_extent_hooks, new_addr, size, pad, alignment, slab, szind, zero, commit); } diff --git a/src/extent_mmap.c b/src/extent_mmap.c index 5fe82ee5..3e4e1ef7 100644 --- a/src/extent_mmap.c +++ b/src/extent_mmap.c @@ -7,8 +7,8 @@ /******************************************************************************/ /* Data. */ -bool opt_munmap = -#ifdef JEMALLOC_MUNMAP +bool opt_retain = +#ifdef JEMALLOC_RETAIN true #else false @@ -34,8 +34,8 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool extent_dalloc_mmap(void *addr, size_t size) { - if (opt_munmap) { + if (!opt_retain) { pages_unmap(addr, size); } - return !opt_munmap; + return opt_retain; } diff --git a/src/jemalloc.c b/src/jemalloc.c index 42146004..97a64431 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1043,7 +1043,7 @@ malloc_conf_init(void) { } CONF_HANDLE_BOOL(opt_abort, "abort") - CONF_HANDLE_BOOL(opt_munmap, "munmap") + CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { int i; bool match = false; diff --git a/src/large.c b/src/large.c index 4d515fbb..f657ccbe 100644 --- a/src/large.c +++ b/src/large.c @@ -93,7 +93,7 @@ large_dalloc_maybe_junk(void *ptr, size_t size) { * Only bother junk filling if the extent isn't about to be * unmapped. */ - if (!opt_munmap || (have_dss && extent_in_dss(ptr))) { + if (opt_retain || (have_dss && extent_in_dss(ptr))) { large_dalloc_junk(ptr, size); } } diff --git a/src/stats.c b/src/stats.c index 5d515186..34fc37f2 100644 --- a/src/stats.c +++ b/src/stats.c @@ -802,7 +802,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, "Run-time option settings:\n"); } OPT_WRITE_BOOL(abort, ",") - OPT_WRITE_BOOL(munmap, ",") + OPT_WRITE_BOOL(retain, ",") OPT_WRITE_CHAR_P(dss, ",") OPT_WRITE_UNSIGNED(narenas, ",") OPT_WRITE_CHAR_P(percpu_arena, ",") diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 0fa240b7..5d6c1a77 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -251,7 +251,7 @@ TEST_BEGIN(test_arena_destroy_hooks_default) { TEST_END /* - * Actually unmap extents, regardless of opt_munmap, so that attempts to access + * Actually unmap extents, regardless of opt_retain, so that attempts to access * a destroyed arena's memory will segfault. */ static bool diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 51a5244e..b07a6d04 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -157,7 +157,7 @@ TEST_BEGIN(test_mallctl_opt) { } while (0) TEST_MALLCTL_OPT(bool, abort, always); - TEST_MALLCTL_OPT(bool, munmap, always); + TEST_MALLCTL_OPT(bool, retain, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(const char *, percpu_arena, always);