Implement support for non-coalescing maps on MinGW.

- Do not reallocate huge objects in place if the number of backing
  chunks would change.
- Do not cache multi-chunk mappings.

This resolves #213.
This commit is contained in:
Jason Evans 2015-07-24 18:21:42 -07:00
parent 40cbd30d50
commit d059b9d6a1
7 changed files with 44 additions and 4 deletions

View File

@ -150,7 +150,10 @@ any of the following arguments (not a definitive list) to 'configure':
the virtual memory for later use. munmap() is disabled by default (i.e.
--disable-munmap is implied) on Linux, which has a quirk in its virtual
memory allocation algorithm that causes semi-permanent VM map holes under
normal jemalloc operation.
normal jemalloc operation. Conversely, munmap() (actually VirtualFree()) is
forcefully enabled on MinGW because virtual memory mappings do not
automatically coalesce (nor fragment on demand), and extra bookkeeping
would be required to track mapping boundaries.
--disable-fill
Disable support for junk/zero filling of memory, quarantine, and redzones.

View File

@ -258,6 +258,7 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
default_munmap="1"
maps_coalesce="1"
case "${host}" in
*-*-darwin* | *-*-ios*)
CFLAGS="$CFLAGS"
@ -341,6 +342,7 @@ case "${host}" in
abi="pecoff"
force_tls="0"
force_lazy_lock="1"
maps_coalesce="0"
RPATH=""
so="dll"
if test "x$je_cv_msvc" = "xyes" ; then
@ -862,6 +864,12 @@ if test "x$enable_tcache" = "x1" ; then
fi
AC_SUBST([enable_tcache])
dnl Indicate whether adjacent virtual memory mappings automatically coalesce
dnl (and fragment on demand).
if test "x${maps_coalesce}" = "x1" ; then
AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
fi
dnl Enable VM deallocation via munmap() by default.
AC_ARG_ENABLE([munmap],
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
@ -873,6 +881,10 @@ fi
],
[enable_munmap="${default_munmap}"]
)
if test "x$enable_munmap" = "x0" -a "x${maps_coalesce}" = "x0" ; then
AC_MSG_RESULT([Forcing munmap to avoid non-coalescing map issues])
enable_munmap="1"
fi
if test "x$enable_munmap" = "x1" ; then
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
fi

View File

@ -70,6 +70,13 @@ static const bool config_prof_libunwind =
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true

View File

@ -169,6 +169,15 @@
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences

View File

@ -337,6 +337,7 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_node_t *node, *prev;
extent_node_t key;
assert(maps_coalesce || size == chunksize);
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@ -421,6 +422,11 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (!maps_coalesce && size != chunksize) {
chunk_dalloc_arena(arena, chunk, size, false);
return;
}
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
true, chunk, size, false);
arena_maybe_purge(arena);

View File

@ -304,6 +304,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
return (false);
}
if (!maps_coalesce)
return (true);
/* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
huge_ralloc_no_move_shrink(ptr, oldsize, usize);

View File

@ -63,9 +63,9 @@ TEST_BEGIN(test_chunk)
"Unexpected arenas.hchunk.2.size failure");
if (huge0 * 2 > huge2) {
/*
* There are at least four size classes per doubling, so
* xallocx() from size=huge2 to size=huge1 is guaranteed to
* leave trailing purgeable memory.
* There are at least four size classes per doubling, so a
* successful xallocx() from size=huge2 to size=huge1 is
* guaranteed to leave trailing purgeable memory.
*/
p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");