Implement support for non-coalescing maps on MinGW.
- Do not reallocate huge objects in place if the number of backing chunks would change. - Do not cache multi-chunk mappings. This resolves #213.
This commit is contained in:
parent
40cbd30d50
commit
d059b9d6a1
5
INSTALL
5
INSTALL
@ -150,7 +150,10 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
the virtual memory for later use. munmap() is disabled by default (i.e.
|
the virtual memory for later use. munmap() is disabled by default (i.e.
|
||||||
--disable-munmap is implied) on Linux, which has a quirk in its virtual
|
--disable-munmap is implied) on Linux, which has a quirk in its virtual
|
||||||
memory allocation algorithm that causes semi-permanent VM map holes under
|
memory allocation algorithm that causes semi-permanent VM map holes under
|
||||||
normal jemalloc operation.
|
normal jemalloc operation. Conversely, munmap() (actually VirtualFree()) is
|
||||||
|
forcefully enabled on MinGW because virtual memory mappings do not
|
||||||
|
automatically coalesce (nor fragment on demand), and extra bookkeeping
|
||||||
|
would be required to track mapping boundaries.
|
||||||
|
|
||||||
--disable-fill
|
--disable-fill
|
||||||
Disable support for junk/zero filling of memory, quarantine, and redzones.
|
Disable support for junk/zero filling of memory, quarantine, and redzones.
|
||||||
|
12
configure.ac
12
configure.ac
@ -258,6 +258,7 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
|
|||||||
dnl definitions need to be seen before any headers are included, which is a pain
|
dnl definitions need to be seen before any headers are included, which is a pain
|
||||||
dnl to make happen otherwise.
|
dnl to make happen otherwise.
|
||||||
default_munmap="1"
|
default_munmap="1"
|
||||||
|
maps_coalesce="1"
|
||||||
case "${host}" in
|
case "${host}" in
|
||||||
*-*-darwin* | *-*-ios*)
|
*-*-darwin* | *-*-ios*)
|
||||||
CFLAGS="$CFLAGS"
|
CFLAGS="$CFLAGS"
|
||||||
@ -341,6 +342,7 @@ case "${host}" in
|
|||||||
abi="pecoff"
|
abi="pecoff"
|
||||||
force_tls="0"
|
force_tls="0"
|
||||||
force_lazy_lock="1"
|
force_lazy_lock="1"
|
||||||
|
maps_coalesce="0"
|
||||||
RPATH=""
|
RPATH=""
|
||||||
so="dll"
|
so="dll"
|
||||||
if test "x$je_cv_msvc" = "xyes" ; then
|
if test "x$je_cv_msvc" = "xyes" ; then
|
||||||
@ -862,6 +864,12 @@ if test "x$enable_tcache" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_tcache])
|
AC_SUBST([enable_tcache])
|
||||||
|
|
||||||
|
dnl Indicate whether adjacent virtual memory mappings automatically coalesce
|
||||||
|
dnl (and fragment on demand).
|
||||||
|
if test "x${maps_coalesce}" = "x1" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
|
||||||
|
fi
|
||||||
|
|
||||||
dnl Enable VM deallocation via munmap() by default.
|
dnl Enable VM deallocation via munmap() by default.
|
||||||
AC_ARG_ENABLE([munmap],
|
AC_ARG_ENABLE([munmap],
|
||||||
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
||||||
@ -873,6 +881,10 @@ fi
|
|||||||
],
|
],
|
||||||
[enable_munmap="${default_munmap}"]
|
[enable_munmap="${default_munmap}"]
|
||||||
)
|
)
|
||||||
|
if test "x$enable_munmap" = "x0" -a "x${maps_coalesce}" = "x0" ; then
|
||||||
|
AC_MSG_RESULT([Forcing munmap to avoid non-coalescing map issues])
|
||||||
|
enable_munmap="1"
|
||||||
|
fi
|
||||||
if test "x$enable_munmap" = "x1" ; then
|
if test "x$enable_munmap" = "x1" ; then
|
||||||
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
|
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
|
||||||
fi
|
fi
|
||||||
|
@ -70,6 +70,13 @@ static const bool config_prof_libunwind =
|
|||||||
false
|
false
|
||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
|
static const bool maps_coalesce =
|
||||||
|
#ifdef JEMALLOC_MAPS_COALESCE
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
static const bool config_munmap =
|
static const bool config_munmap =
|
||||||
#ifdef JEMALLOC_MUNMAP
|
#ifdef JEMALLOC_MUNMAP
|
||||||
true
|
true
|
||||||
|
@ -169,6 +169,15 @@
|
|||||||
/* One page is 2^LG_PAGE bytes. */
|
/* One page is 2^LG_PAGE bytes. */
|
||||||
#undef LG_PAGE
|
#undef LG_PAGE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, adjacent virtual memory mappings with identical attributes
|
||||||
|
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||||
|
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||||
|
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||||
|
* mappings do *not* coalesce/fragment.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_MAPS_COALESCE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
||||||
* later reuse. This is disabled by default on Linux because common sequences
|
* later reuse. This is disabled by default on Linux because common sequences
|
||||||
|
@ -337,6 +337,7 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
extent_node_t *node, *prev;
|
extent_node_t *node, *prev;
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
|
|
||||||
|
assert(maps_coalesce || size == chunksize);
|
||||||
assert(!cache || !zeroed);
|
assert(!cache || !zeroed);
|
||||||
unzeroed = cache || !zeroed;
|
unzeroed = cache || !zeroed;
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||||
@ -421,6 +422,11 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
|
if (!maps_coalesce && size != chunksize) {
|
||||||
|
chunk_dalloc_arena(arena, chunk, size, false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
|
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
|
||||||
true, chunk, size, false);
|
true, chunk, size, false);
|
||||||
arena_maybe_purge(arena);
|
arena_maybe_purge(arena);
|
||||||
|
@ -304,6 +304,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!maps_coalesce)
|
||||||
|
return (true);
|
||||||
|
|
||||||
/* Shrink the allocation in-place. */
|
/* Shrink the allocation in-place. */
|
||||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
|
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
|
||||||
huge_ralloc_no_move_shrink(ptr, oldsize, usize);
|
huge_ralloc_no_move_shrink(ptr, oldsize, usize);
|
||||||
|
@ -63,9 +63,9 @@ TEST_BEGIN(test_chunk)
|
|||||||
"Unexpected arenas.hchunk.2.size failure");
|
"Unexpected arenas.hchunk.2.size failure");
|
||||||
if (huge0 * 2 > huge2) {
|
if (huge0 * 2 > huge2) {
|
||||||
/*
|
/*
|
||||||
* There are at least four size classes per doubling, so
|
* There are at least four size classes per doubling, so a
|
||||||
* xallocx() from size=huge2 to size=huge1 is guaranteed to
|
* successful xallocx() from size=huge2 to size=huge1 is
|
||||||
* leave trailing purgeable memory.
|
* guaranteed to leave trailing purgeable memory.
|
||||||
*/
|
*/
|
||||||
p = mallocx(huge2, 0);
|
p = mallocx(huge2, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
Loading…
Reference in New Issue
Block a user